1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
|
/*-------------------------------------------------------------------------
*
* nbtutils.c
* Utility code for Postgres btree implementation.
*
* Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/access/nbtree/nbtutils.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include <time.h>
#include "access/nbtree.h"
#include "access/reloptions.h"
#include "access/relscan.h"
#include "commands/progress.h"
#include "common/int.h"
#include "lib/qunique.h"
#include "miscadmin.h"
#include "utils/datum.h"
#include "utils/lsyscache.h"
#include "utils/rel.h"
static int _bt_compare_int(const void *va, const void *vb);
static int _bt_keep_natts(Relation rel, IndexTuple lastleft,
IndexTuple firstright, BTScanInsert itup_key);
/*
* _bt_mkscankey
* Build an insertion scan key that contains comparison data from itup
* as well as comparator routines appropriate to the key datatypes.
*
* The result is intended for use with _bt_compare() and _bt_truncate().
* Callers that don't need to fill out the insertion scankey arguments
* (e.g. they use an ad-hoc comparison routine, or only need a scankey
* for _bt_truncate()) can pass a NULL index tuple. The scankey will
* be initialized as if an "all truncated" pivot tuple was passed
* instead.
*
* Note that we may occasionally have to share lock the metapage to
* determine whether or not the keys in the index are expected to be
* unique (i.e. if this is a "heapkeyspace" index). We assume a
* heapkeyspace index when caller passes a NULL tuple, allowing index
* build callers to avoid accessing the non-existent metapage. We
* also assume that the index is _not_ allequalimage when a NULL tuple
* is passed; CREATE INDEX callers call _bt_allequalimage() to set the
* field themselves.
*/
BTScanInsert
_bt_mkscankey(Relation rel, IndexTuple itup)
{
BTScanInsert key;
ScanKey skey;
TupleDesc itupdesc;
int indnkeyatts;
int16 *indoption;
int tupnatts;
int i;
itupdesc = RelationGetDescr(rel);
indnkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
indoption = rel->rd_indoption;
tupnatts = itup ? BTreeTupleGetNAtts(itup, rel) : 0;
Assert(tupnatts <= IndexRelationGetNumberOfAttributes(rel));
/*
* We'll execute search using scan key constructed on key columns.
* Truncated attributes and non-key attributes are omitted from the final
* scan key.
*/
key = palloc(offsetof(BTScanInsertData, scankeys) +
sizeof(ScanKeyData) * indnkeyatts);
if (itup)
_bt_metaversion(rel, &key->heapkeyspace, &key->allequalimage);
else
{
/* Utility statement callers can set these fields themselves */
key->heapkeyspace = true;
key->allequalimage = false;
}
key->anynullkeys = false; /* initial assumption */
key->nextkey = false; /* usual case, required by btinsert */
key->backward = false; /* usual case, required by btinsert */
key->keysz = Min(indnkeyatts, tupnatts);
key->scantid = key->heapkeyspace && itup ?
BTreeTupleGetHeapTID(itup) : NULL;
skey = key->scankeys;
for (i = 0; i < indnkeyatts; i++)
{
FmgrInfo *procinfo;
Datum arg;
bool null;
int flags;
/*
* We can use the cached (default) support procs since no cross-type
* comparison can be needed.
*/
procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC);
/*
* Key arguments built from truncated attributes (or when caller
* provides no tuple) are defensively represented as NULL values. They
* should never be used.
*/
if (i < tupnatts)
arg = index_getattr(itup, i + 1, itupdesc, &null);
else
{
arg = (Datum) 0;
null = true;
}
flags = (null ? SK_ISNULL : 0) | (indoption[i] << SK_BT_INDOPTION_SHIFT);
ScanKeyEntryInitializeWithInfo(&skey[i],
flags,
(AttrNumber) (i + 1),
InvalidStrategy,
InvalidOid,
rel->rd_indcollation[i],
procinfo,
arg);
/* Record if any key attribute is NULL (or truncated) */
if (null)
key->anynullkeys = true;
}
/*
* In NULLS NOT DISTINCT mode, we pretend that there are no null keys, so
* that full uniqueness check is done.
*/
if (rel->rd_index->indnullsnotdistinct)
key->anynullkeys = false;
return key;
}
/*
* free a retracement stack made by _bt_search.
*/
void
_bt_freestack(BTStack stack)
{
BTStack ostack;
while (stack != NULL)
{
ostack = stack;
stack = stack->bts_parent;
pfree(ostack);
}
}
/*
* qsort comparison function for int arrays
*/
static int
_bt_compare_int(const void *va, const void *vb)
{
int a = *((const int *) va);
int b = *((const int *) vb);
return pg_cmp_s32(a, b);
}
/*
* _bt_killitems - set LP_DEAD state for items an indexscan caller has
* told us were killed
*
* scan->opaque, referenced locally through so, contains information about the
* current page and killed tuples thereon (generally, this should only be
* called if so->numKilled > 0).
*
* Caller should not have a lock on the so->currPos page, but must hold a
* buffer pin when !so->dropPin. When we return, it still won't be locked.
* It'll continue to hold whatever pins were held before calling here.
*
* We match items by heap TID before assuming they are the right ones to set
* LP_DEAD. If the scan is one that holds a buffer pin on the target page
* continuously from initially reading the items until applying this function
* (if it is a !so->dropPin scan), VACUUM cannot have deleted any items on the
* page, so the page's TIDs can't have been recycled by now. There's no risk
* that we'll confuse a new index tuple that happens to use a recycled TID
* with a now-removed tuple with the same TID (that used to be on this same
* page). We can't rely on that during scans that drop buffer pins eagerly
* (so->dropPin scans), though, so we must condition setting LP_DEAD bits on
* the page LSN having not changed since back when _bt_readpage saw the page.
* We totally give up on setting LP_DEAD bits when the page LSN changed.
*
* We give up much less often during !so->dropPin scans, but it still happens.
* We cope with cases where items have moved right due to insertions. If an
* item has moved off the current page due to a split, we'll fail to find it
* and just give up on it.
*/
void
_bt_killitems(IndexScanDesc scan)
{
Relation rel = scan->indexRelation;
BTScanOpaque so = (BTScanOpaque) scan->opaque;
Page page;
BTPageOpaque opaque;
OffsetNumber minoff;
OffsetNumber maxoff;
int numKilled = so->numKilled;
bool killedsomething = false;
Buffer buf;
Assert(numKilled > 0);
Assert(BTScanPosIsValid(so->currPos));
Assert(scan->heapRelation != NULL); /* can't be a bitmap index scan */
/* Always invalidate so->killedItems[] before leaving so->currPos */
so->numKilled = 0;
/*
* We need to iterate through so->killedItems[] in leaf page order; the
* loop below expects this (when marking posting list tuples, at least).
* so->killedItems[] is now in whatever order the scan returned items in.
* Scrollable cursor scans might have even saved the same item/TID twice.
*
* Sort and unique-ify so->killedItems[] to deal with all this.
*/
if (numKilled > 1)
{
qsort(so->killedItems, numKilled, sizeof(int), _bt_compare_int);
numKilled = qunique(so->killedItems, numKilled, sizeof(int),
_bt_compare_int);
}
if (!so->dropPin)
{
/*
* We have held the pin on this page since we read the index tuples,
* so all we need to do is lock it. The pin will have prevented
* concurrent VACUUMs from recycling any of the TIDs on the page.
*/
Assert(BTScanPosIsPinned(so->currPos));
buf = so->currPos.buf;
_bt_lockbuf(rel, buf, BT_READ);
}
else
{
XLogRecPtr latestlsn;
Assert(!BTScanPosIsPinned(so->currPos));
Assert(RelationNeedsWAL(rel));
buf = _bt_getbuf(rel, so->currPos.currPage, BT_READ);
latestlsn = BufferGetLSNAtomic(buf);
Assert(so->currPos.lsn <= latestlsn);
if (so->currPos.lsn != latestlsn)
{
/* Modified, give up on hinting */
_bt_relbuf(rel, buf);
return;
}
/* Unmodified, hinting is safe */
}
page = BufferGetPage(buf);
opaque = BTPageGetOpaque(page);
minoff = P_FIRSTDATAKEY(opaque);
maxoff = PageGetMaxOffsetNumber(page);
/* Iterate through so->killedItems[] in leaf page order */
for (int i = 0; i < numKilled; i++)
{
int itemIndex = so->killedItems[i];
BTScanPosItem *kitem = &so->currPos.items[itemIndex];
OffsetNumber offnum = kitem->indexOffset;
Assert(itemIndex >= so->currPos.firstItem &&
itemIndex <= so->currPos.lastItem);
Assert(i == 0 ||
offnum >= so->currPos.items[so->killedItems[i - 1]].indexOffset);
if (offnum < minoff)
continue; /* pure paranoia */
while (offnum <= maxoff)
{
ItemId iid = PageGetItemId(page, offnum);
IndexTuple ituple = (IndexTuple) PageGetItem(page, iid);
bool killtuple = false;
if (BTreeTupleIsPosting(ituple))
{
int pi = i + 1;
int nposting = BTreeTupleGetNPosting(ituple);
int j;
/*
* Note that the page may have been modified in almost any way
* since we first read it (in the !so->dropPin case), so it's
* possible that this posting list tuple wasn't a posting list
* tuple when we first encountered its heap TIDs.
*/
for (j = 0; j < nposting; j++)
{
ItemPointer item = BTreeTupleGetPostingN(ituple, j);
if (!ItemPointerEquals(item, &kitem->heapTid))
break; /* out of posting list loop */
/*
* kitem must have matching offnum when heap TIDs match,
* though only in the common case where the page can't
* have been concurrently modified
*/
Assert(kitem->indexOffset == offnum || !so->dropPin);
/*
* Read-ahead to later kitems here.
*
* We rely on the assumption that not advancing kitem here
* will prevent us from considering the posting list tuple
* fully dead by not matching its next heap TID in next
* loop iteration.
*
* If, on the other hand, this is the final heap TID in
* the posting list tuple, then tuple gets killed
* regardless (i.e. we handle the case where the last
* kitem is also the last heap TID in the last index tuple
* correctly -- posting tuple still gets killed).
*/
if (pi < numKilled)
kitem = &so->currPos.items[so->killedItems[pi++]];
}
/*
* Don't bother advancing the outermost loop's int iterator to
* avoid processing killed items that relate to the same
* offnum/posting list tuple. This micro-optimization hardly
* seems worth it. (Further iterations of the outermost loop
* will fail to match on this same posting list's first heap
* TID instead, so we'll advance to the next offnum/index
* tuple pretty quickly.)
*/
if (j == nposting)
killtuple = true;
}
else if (ItemPointerEquals(&ituple->t_tid, &kitem->heapTid))
killtuple = true;
/*
* Mark index item as dead, if it isn't already. Since this
* happens while holding a buffer lock possibly in shared mode,
* it's possible that multiple processes attempt to do this
* simultaneously, leading to multiple full-page images being sent
* to WAL (if wal_log_hints or data checksums are enabled), which
* is undesirable.
*/
if (killtuple && !ItemIdIsDead(iid))
{
/* found the item/all posting list items */
ItemIdMarkDead(iid);
killedsomething = true;
break; /* out of inner search loop */
}
offnum = OffsetNumberNext(offnum);
}
}
/*
* Since this can be redone later if needed, mark as dirty hint.
*
* Whenever we mark anything LP_DEAD, we also set the page's
* BTP_HAS_GARBAGE flag, which is likewise just a hint. (Note that we
* only rely on the page-level flag in !heapkeyspace indexes.)
*/
if (killedsomething)
{
opaque->btpo_flags |= BTP_HAS_GARBAGE;
MarkBufferDirtyHint(buf, true);
}
if (!so->dropPin)
_bt_unlockbuf(rel, buf);
else
_bt_relbuf(rel, buf);
}
/*
* The following routines manage a shared-memory area in which we track
* assignment of "vacuum cycle IDs" to currently-active btree vacuuming
* operations. There is a single counter which increments each time we
* start a vacuum to assign it a cycle ID. Since multiple vacuums could
* be active concurrently, we have to track the cycle ID for each active
* vacuum; this requires at most MaxBackends entries (usually far fewer).
* We assume at most one vacuum can be active for a given index.
*
* Access to the shared memory area is controlled by BtreeVacuumLock.
* In principle we could use a separate lmgr locktag for each index,
* but a single LWLock is much cheaper, and given the short time that
* the lock is ever held, the concurrency hit should be minimal.
*/
typedef struct BTOneVacInfo
{
LockRelId relid; /* global identifier of an index */
BTCycleId cycleid; /* cycle ID for its active VACUUM */
} BTOneVacInfo;
typedef struct BTVacInfo
{
BTCycleId cycle_ctr; /* cycle ID most recently assigned */
int num_vacuums; /* number of currently active VACUUMs */
int max_vacuums; /* allocated length of vacuums[] array */
BTOneVacInfo vacuums[FLEXIBLE_ARRAY_MEMBER];
} BTVacInfo;
static BTVacInfo *btvacinfo;
/*
* _bt_vacuum_cycleid --- get the active vacuum cycle ID for an index,
* or zero if there is no active VACUUM
*
* Note: for correct interlocking, the caller must already hold pin and
* exclusive lock on each buffer it will store the cycle ID into. This
* ensures that even if a VACUUM starts immediately afterwards, it cannot
* process those pages until the page split is complete.
*/
BTCycleId
_bt_vacuum_cycleid(Relation rel)
{
BTCycleId result = 0;
int i;
/* Share lock is enough since this is a read-only operation */
LWLockAcquire(BtreeVacuumLock, LW_SHARED);
for (i = 0; i < btvacinfo->num_vacuums; i++)
{
BTOneVacInfo *vac = &btvacinfo->vacuums[i];
if (vac->relid.relId == rel->rd_lockInfo.lockRelId.relId &&
vac->relid.dbId == rel->rd_lockInfo.lockRelId.dbId)
{
result = vac->cycleid;
break;
}
}
LWLockRelease(BtreeVacuumLock);
return result;
}
/*
* _bt_start_vacuum --- assign a cycle ID to a just-starting VACUUM operation
*
* Note: the caller must guarantee that it will eventually call
* _bt_end_vacuum, else we'll permanently leak an array slot. To ensure
* that this happens even in elog(FATAL) scenarios, the appropriate coding
* is not just a PG_TRY, but
* PG_ENSURE_ERROR_CLEANUP(_bt_end_vacuum_callback, PointerGetDatum(rel))
*/
BTCycleId
_bt_start_vacuum(Relation rel)
{
BTCycleId result;
int i;
BTOneVacInfo *vac;
LWLockAcquire(BtreeVacuumLock, LW_EXCLUSIVE);
/*
* Assign the next cycle ID, being careful to avoid zero as well as the
* reserved high values.
*/
result = ++(btvacinfo->cycle_ctr);
if (result == 0 || result > MAX_BT_CYCLE_ID)
result = btvacinfo->cycle_ctr = 1;
/* Let's just make sure there's no entry already for this index */
for (i = 0; i < btvacinfo->num_vacuums; i++)
{
vac = &btvacinfo->vacuums[i];
if (vac->relid.relId == rel->rd_lockInfo.lockRelId.relId &&
vac->relid.dbId == rel->rd_lockInfo.lockRelId.dbId)
{
/*
* Unlike most places in the backend, we have to explicitly
* release our LWLock before throwing an error. This is because
* we expect _bt_end_vacuum() to be called before transaction
* abort cleanup can run to release LWLocks.
*/
LWLockRelease(BtreeVacuumLock);
elog(ERROR, "multiple active vacuums for index \"%s\"",
RelationGetRelationName(rel));
}
}
/* OK, add an entry */
if (btvacinfo->num_vacuums >= btvacinfo->max_vacuums)
{
LWLockRelease(BtreeVacuumLock);
elog(ERROR, "out of btvacinfo slots");
}
vac = &btvacinfo->vacuums[btvacinfo->num_vacuums];
vac->relid = rel->rd_lockInfo.lockRelId;
vac->cycleid = result;
btvacinfo->num_vacuums++;
LWLockRelease(BtreeVacuumLock);
return result;
}
/*
* _bt_end_vacuum --- mark a btree VACUUM operation as done
*
* Note: this is deliberately coded not to complain if no entry is found;
* this allows the caller to put PG_TRY around the start_vacuum operation.
*/
void
_bt_end_vacuum(Relation rel)
{
int i;
LWLockAcquire(BtreeVacuumLock, LW_EXCLUSIVE);
/* Find the array entry */
for (i = 0; i < btvacinfo->num_vacuums; i++)
{
BTOneVacInfo *vac = &btvacinfo->vacuums[i];
if (vac->relid.relId == rel->rd_lockInfo.lockRelId.relId &&
vac->relid.dbId == rel->rd_lockInfo.lockRelId.dbId)
{
/* Remove it by shifting down the last entry */
*vac = btvacinfo->vacuums[btvacinfo->num_vacuums - 1];
btvacinfo->num_vacuums--;
break;
}
}
LWLockRelease(BtreeVacuumLock);
}
/*
* _bt_end_vacuum wrapped as an on_shmem_exit callback function
*/
void
_bt_end_vacuum_callback(int code, Datum arg)
{
_bt_end_vacuum((Relation) DatumGetPointer(arg));
}
/*
* BTreeShmemSize --- report amount of shared memory space needed
*/
Size
BTreeShmemSize(void)
{
Size size;
size = offsetof(BTVacInfo, vacuums);
size = add_size(size, mul_size(MaxBackends, sizeof(BTOneVacInfo)));
return size;
}
/*
* BTreeShmemInit --- initialize this module's shared memory
*/
void
BTreeShmemInit(void)
{
bool found;
btvacinfo = (BTVacInfo *) ShmemInitStruct("BTree Vacuum State",
BTreeShmemSize(),
&found);
if (!IsUnderPostmaster)
{
/* Initialize shared memory area */
Assert(!found);
/*
* It doesn't really matter what the cycle counter starts at, but
* having it always start the same doesn't seem good. Seed with
* low-order bits of time() instead.
*/
btvacinfo->cycle_ctr = (BTCycleId) time(NULL);
btvacinfo->num_vacuums = 0;
btvacinfo->max_vacuums = MaxBackends;
}
else
Assert(found);
}
bytea *
btoptions(Datum reloptions, bool validate)
{
static const relopt_parse_elt tab[] = {
{"fillfactor", RELOPT_TYPE_INT, offsetof(BTOptions, fillfactor)},
{"vacuum_cleanup_index_scale_factor", RELOPT_TYPE_REAL,
offsetof(BTOptions, vacuum_cleanup_index_scale_factor)},
{"deduplicate_items", RELOPT_TYPE_BOOL,
offsetof(BTOptions, deduplicate_items)}
};
return (bytea *) build_reloptions(reloptions, validate,
RELOPT_KIND_BTREE,
sizeof(BTOptions),
tab, lengthof(tab));
}
/*
* btproperty() -- Check boolean properties of indexes.
*
* This is optional, but handling AMPROP_RETURNABLE here saves opening the rel
* to call btcanreturn.
*/
bool
btproperty(Oid index_oid, int attno,
IndexAMProperty prop, const char *propname,
bool *res, bool *isnull)
{
switch (prop)
{
case AMPROP_RETURNABLE:
/* answer only for columns, not AM or whole index */
if (attno == 0)
return false;
/* otherwise, btree can always return data */
*res = true;
return true;
default:
return false; /* punt to generic code */
}
}
/*
* btbuildphasename() -- Return name of index build phase.
*/
char *
btbuildphasename(int64 phasenum)
{
switch (phasenum)
{
case PROGRESS_CREATEIDX_SUBPHASE_INITIALIZE:
return "initializing";
case PROGRESS_BTREE_PHASE_INDEXBUILD_TABLESCAN:
return "scanning table";
case PROGRESS_BTREE_PHASE_PERFORMSORT_1:
return "sorting live tuples";
case PROGRESS_BTREE_PHASE_PERFORMSORT_2:
return "sorting dead tuples";
case PROGRESS_BTREE_PHASE_LEAF_LOAD:
return "loading tuples in tree";
default:
return NULL;
}
}
/*
* _bt_truncate() -- create tuple without unneeded suffix attributes.
*
* Returns truncated pivot index tuple allocated in caller's memory context,
* with key attributes copied from caller's firstright argument. If rel is
* an INCLUDE index, non-key attributes will definitely be truncated away,
* since they're not part of the key space. More aggressive suffix
* truncation can take place when it's clear that the returned tuple does not
* need one or more suffix key attributes. We only need to keep firstright
* attributes up to and including the first non-lastleft-equal attribute.
* Caller's insertion scankey is used to compare the tuples; the scankey's
* argument values are not considered here.
*
* Note that returned tuple's t_tid offset will hold the number of attributes
* present, so the original item pointer offset is not represented. Caller
* should only change truncated tuple's downlink. Note also that truncated
* key attributes are treated as containing "minus infinity" values by
* _bt_compare().
*
* In the worst case (when a heap TID must be appended to distinguish lastleft
* from firstright), the size of the returned tuple is the size of firstright
* plus the size of an additional MAXALIGN()'d item pointer. This guarantee
* is important, since callers need to stay under the 1/3 of a page
* restriction on tuple size. If this routine is ever taught to truncate
* within an attribute/datum, it will need to avoid returning an enlarged
* tuple to caller when truncation + TOAST compression ends up enlarging the
* final datum.
*/
IndexTuple
_bt_truncate(Relation rel, IndexTuple lastleft, IndexTuple firstright,
BTScanInsert itup_key)
{
TupleDesc itupdesc = RelationGetDescr(rel);
int16 nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
int keepnatts;
IndexTuple pivot;
IndexTuple tidpivot;
ItemPointer pivotheaptid;
Size newsize;
/*
* We should only ever truncate non-pivot tuples from leaf pages. It's
* never okay to truncate when splitting an internal page.
*/
Assert(!BTreeTupleIsPivot(lastleft) && !BTreeTupleIsPivot(firstright));
/* Determine how many attributes must be kept in truncated tuple */
keepnatts = _bt_keep_natts(rel, lastleft, firstright, itup_key);
#ifdef DEBUG_NO_TRUNCATE
/* Force truncation to be ineffective for testing purposes */
keepnatts = nkeyatts + 1;
#endif
pivot = index_truncate_tuple(itupdesc, firstright,
Min(keepnatts, nkeyatts));
if (BTreeTupleIsPosting(pivot))
{
/*
* index_truncate_tuple() just returns a straight copy of firstright
* when it has no attributes to truncate. When that happens, we may
* need to truncate away a posting list here instead.
*/
Assert(keepnatts == nkeyatts || keepnatts == nkeyatts + 1);
Assert(IndexRelationGetNumberOfAttributes(rel) == nkeyatts);
pivot->t_info &= ~INDEX_SIZE_MASK;
pivot->t_info |= MAXALIGN(BTreeTupleGetPostingOffset(firstright));
}
/*
* If there is a distinguishing key attribute within pivot tuple, we're
* done
*/
if (keepnatts <= nkeyatts)
{
BTreeTupleSetNAtts(pivot, keepnatts, false);
return pivot;
}
/*
* We have to store a heap TID in the new pivot tuple, since no non-TID
* key attribute value in firstright distinguishes the right side of the
* split from the left side. nbtree conceptualizes this case as an
* inability to truncate away any key attributes, since heap TID is
* treated as just another key attribute (despite lacking a pg_attribute
* entry).
*
* Use enlarged space that holds a copy of pivot. We need the extra space
* to store a heap TID at the end (using the special pivot tuple
* representation). Note that the original pivot already has firstright's
* possible posting list/non-key attribute values removed at this point.
*/
newsize = MAXALIGN(IndexTupleSize(pivot)) + MAXALIGN(sizeof(ItemPointerData));
tidpivot = palloc0(newsize);
memcpy(tidpivot, pivot, MAXALIGN(IndexTupleSize(pivot)));
/* Cannot leak memory here */
pfree(pivot);
/*
* Store all of firstright's key attribute values plus a tiebreaker heap
* TID value in enlarged pivot tuple
*/
tidpivot->t_info &= ~INDEX_SIZE_MASK;
tidpivot->t_info |= newsize;
BTreeTupleSetNAtts(tidpivot, nkeyatts, true);
pivotheaptid = BTreeTupleGetHeapTID(tidpivot);
/*
* Lehman & Yao use lastleft as the leaf high key in all cases, but don't
* consider suffix truncation. It seems like a good idea to follow that
* example in cases where no truncation takes place -- use lastleft's heap
* TID. (This is also the closest value to negative infinity that's
* legally usable.)
*/
ItemPointerCopy(BTreeTupleGetMaxHeapTID(lastleft), pivotheaptid);
/*
* We're done. Assert() that heap TID invariants hold before returning.
*
* Lehman and Yao require that the downlink to the right page, which is to
* be inserted into the parent page in the second phase of a page split be
* a strict lower bound on items on the right page, and a non-strict upper
* bound for items on the left page. Assert that heap TIDs follow these
* invariants, since a heap TID value is apparently needed as a
* tiebreaker.
*/
#ifndef DEBUG_NO_TRUNCATE
Assert(ItemPointerCompare(BTreeTupleGetMaxHeapTID(lastleft),
BTreeTupleGetHeapTID(firstright)) < 0);
Assert(ItemPointerCompare(pivotheaptid,
BTreeTupleGetHeapTID(lastleft)) >= 0);
Assert(ItemPointerCompare(pivotheaptid,
BTreeTupleGetHeapTID(firstright)) < 0);
#else
/*
* Those invariants aren't guaranteed to hold for lastleft + firstright
* heap TID attribute values when they're considered here only because
* DEBUG_NO_TRUNCATE is defined (a heap TID is probably not actually
* needed as a tiebreaker). DEBUG_NO_TRUNCATE must therefore use a heap
* TID value that always works as a strict lower bound for items to the
* right. In particular, it must avoid using firstright's leading key
* attribute values along with lastleft's heap TID value when lastleft's
* TID happens to be greater than firstright's TID.
*/
ItemPointerCopy(BTreeTupleGetHeapTID(firstright), pivotheaptid);
/*
* Pivot heap TID should never be fully equal to firstright. Note that
* the pivot heap TID will still end up equal to lastleft's heap TID when
* that's the only usable value.
*/
ItemPointerSetOffsetNumber(pivotheaptid,
OffsetNumberPrev(ItemPointerGetOffsetNumber(pivotheaptid)));
Assert(ItemPointerCompare(pivotheaptid,
BTreeTupleGetHeapTID(firstright)) < 0);
#endif
return tidpivot;
}
/*
* _bt_keep_natts - how many key attributes to keep when truncating.
*
* Caller provides two tuples that enclose a split point. Caller's insertion
* scankey is used to compare the tuples; the scankey's argument values are
* not considered here.
*
* This can return a number of attributes that is one greater than the
* number of key attributes for the index relation. This indicates that the
* caller must use a heap TID as a unique-ifier in new pivot tuple.
*/
static int
_bt_keep_natts(Relation rel, IndexTuple lastleft, IndexTuple firstright,
BTScanInsert itup_key)
{
int nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
TupleDesc itupdesc = RelationGetDescr(rel);
int keepnatts;
ScanKey scankey;
/*
* _bt_compare() treats truncated key attributes as having the value minus
* infinity, which would break searches within !heapkeyspace indexes. We
* must still truncate away non-key attribute values, though.
*/
if (!itup_key->heapkeyspace)
return nkeyatts;
scankey = itup_key->scankeys;
keepnatts = 1;
for (int attnum = 1; attnum <= nkeyatts; attnum++, scankey++)
{
Datum datum1,
datum2;
bool isNull1,
isNull2;
datum1 = index_getattr(lastleft, attnum, itupdesc, &isNull1);
datum2 = index_getattr(firstright, attnum, itupdesc, &isNull2);
if (isNull1 != isNull2)
break;
if (!isNull1 &&
DatumGetInt32(FunctionCall2Coll(&scankey->sk_func,
scankey->sk_collation,
datum1,
datum2)) != 0)
break;
keepnatts++;
}
/*
* Assert that _bt_keep_natts_fast() agrees with us in passing. This is
* expected in an allequalimage index.
*/
Assert(!itup_key->allequalimage ||
keepnatts == _bt_keep_natts_fast(rel, lastleft, firstright));
return keepnatts;
}
/*
* _bt_keep_natts_fast - fast bitwise variant of _bt_keep_natts.
*
* This is exported so that a candidate split point can have its effect on
* suffix truncation inexpensively evaluated ahead of time when finding a
* split location. A naive bitwise approach to datum comparisons is used to
* save cycles.
*
* The approach taken here usually provides the same answer as _bt_keep_natts
* will (for the same pair of tuples from a heapkeyspace index), since the
* majority of btree opclasses can never indicate that two datums are equal
* unless they're bitwise equal after detoasting. When an index only has
* "equal image" columns, routine is guaranteed to give the same result as
* _bt_keep_natts would.
*
* Callers can rely on the fact that attributes considered equal here are
* definitely also equal according to _bt_keep_natts, even when the index uses
* an opclass or collation that is not "allequalimage"/deduplication-safe.
* This weaker guarantee is good enough for nbtsplitloc.c caller, since false
* negatives generally only have the effect of making leaf page splits use a
* more balanced split point.
*/
int
_bt_keep_natts_fast(Relation rel, IndexTuple lastleft, IndexTuple firstright)
{
TupleDesc itupdesc = RelationGetDescr(rel);
int keysz = IndexRelationGetNumberOfKeyAttributes(rel);
int keepnatts;
keepnatts = 1;
for (int attnum = 1; attnum <= keysz; attnum++)
{
Datum datum1,
datum2;
bool isNull1,
isNull2;
CompactAttribute *att;
datum1 = index_getattr(lastleft, attnum, itupdesc, &isNull1);
datum2 = index_getattr(firstright, attnum, itupdesc, &isNull2);
att = TupleDescCompactAttr(itupdesc, attnum - 1);
if (isNull1 != isNull2)
break;
if (!isNull1 &&
!datum_image_eq(datum1, datum2, att->attbyval, att->attlen))
break;
keepnatts++;
}
return keepnatts;
}
/*
* _bt_check_natts() -- Verify tuple has expected number of attributes.
*
* Returns value indicating if the expected number of attributes were found
* for a particular offset on page. This can be used as a general purpose
* sanity check.
*
* Testing a tuple directly with BTreeTupleGetNAtts() should generally be
* preferred to calling here. That's usually more convenient, and is always
* more explicit. Call here instead when offnum's tuple may be a negative
* infinity tuple that uses the pre-v11 on-disk representation, or when a low
* context check is appropriate. This routine is as strict as possible about
* what is expected on each version of btree.
*/
bool
_bt_check_natts(Relation rel, bool heapkeyspace, Page page, OffsetNumber offnum)
{
int16 natts = IndexRelationGetNumberOfAttributes(rel);
int16 nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
BTPageOpaque opaque = BTPageGetOpaque(page);
IndexTuple itup;
int tupnatts;
/*
* We cannot reliably test a deleted or half-dead page, since they have
* dummy high keys
*/
if (P_IGNORE(opaque))
return true;
Assert(offnum >= FirstOffsetNumber &&
offnum <= PageGetMaxOffsetNumber(page));
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
tupnatts = BTreeTupleGetNAtts(itup, rel);
/* !heapkeyspace indexes do not support deduplication */
if (!heapkeyspace && BTreeTupleIsPosting(itup))
return false;
/* Posting list tuples should never have "pivot heap TID" bit set */
if (BTreeTupleIsPosting(itup) &&
(ItemPointerGetOffsetNumberNoCheck(&itup->t_tid) &
BT_PIVOT_HEAP_TID_ATTR) != 0)
return false;
/* INCLUDE indexes do not support deduplication */
if (natts != nkeyatts && BTreeTupleIsPosting(itup))
return false;
if (P_ISLEAF(opaque))
{
if (offnum >= P_FIRSTDATAKEY(opaque))
{
/*
* Non-pivot tuple should never be explicitly marked as a pivot
* tuple
*/
if (BTreeTupleIsPivot(itup))
return false;
/*
* Leaf tuples that are not the page high key (non-pivot tuples)
* should never be truncated. (Note that tupnatts must have been
* inferred, even with a posting list tuple, because only pivot
* tuples store tupnatts directly.)
*/
return tupnatts == natts;
}
else
{
/*
* Rightmost page doesn't contain a page high key, so tuple was
* checked above as ordinary leaf tuple
*/
Assert(!P_RIGHTMOST(opaque));
/*
* !heapkeyspace high key tuple contains only key attributes. Note
* that tupnatts will only have been explicitly represented in
* !heapkeyspace indexes that happen to have non-key attributes.
*/
if (!heapkeyspace)
return tupnatts == nkeyatts;
/* Use generic heapkeyspace pivot tuple handling */
}
}
else /* !P_ISLEAF(opaque) */
{
if (offnum == P_FIRSTDATAKEY(opaque))
{
/*
* The first tuple on any internal page (possibly the first after
* its high key) is its negative infinity tuple. Negative
* infinity tuples are always truncated to zero attributes. They
* are a particular kind of pivot tuple.
*/
if (heapkeyspace)
return tupnatts == 0;
/*
* The number of attributes won't be explicitly represented if the
* negative infinity tuple was generated during a page split that
* occurred with a version of Postgres before v11. There must be
* a problem when there is an explicit representation that is
* non-zero, or when there is no explicit representation and the
* tuple is evidently not a pre-pg_upgrade tuple.
*
* Prior to v11, downlinks always had P_HIKEY as their offset.
* Accept that as an alternative indication of a valid
* !heapkeyspace negative infinity tuple.
*/
return tupnatts == 0 ||
ItemPointerGetOffsetNumber(&(itup->t_tid)) == P_HIKEY;
}
else
{
/*
* !heapkeyspace downlink tuple with separator key contains only
* key attributes. Note that tupnatts will only have been
* explicitly represented in !heapkeyspace indexes that happen to
* have non-key attributes.
*/
if (!heapkeyspace)
return tupnatts == nkeyatts;
/* Use generic heapkeyspace pivot tuple handling */
}
}
/* Handle heapkeyspace pivot tuples (excluding minus infinity items) */
Assert(heapkeyspace);
/*
* Explicit representation of the number of attributes is mandatory with
* heapkeyspace index pivot tuples, regardless of whether or not there are
* non-key attributes.
*/
if (!BTreeTupleIsPivot(itup))
return false;
/* Pivot tuple should not use posting list representation (redundant) */
if (BTreeTupleIsPosting(itup))
return false;
/*
* Heap TID is a tiebreaker key attribute, so it cannot be untruncated
* when any other key attribute is truncated
*/
if (BTreeTupleGetHeapTID(itup) != NULL && tupnatts != nkeyatts)
return false;
/*
* Pivot tuple must have at least one untruncated key attribute (minus
* infinity pivot tuples are the only exception). Pivot tuples can never
* represent that there is a value present for a key attribute that
* exceeds pg_index.indnkeyatts for the index.
*/
return tupnatts > 0 && tupnatts <= nkeyatts;
}
/*
*
* _bt_check_third_page() -- check whether tuple fits on a btree page at all.
*
* We actually need to be able to fit three items on every page, so restrict
* any one item to 1/3 the per-page available space. Note that itemsz should
* not include the ItemId overhead.
*
* It might be useful to apply TOAST methods rather than throw an error here.
* Using out of line storage would break assumptions made by suffix truncation
* and by contrib/amcheck, though.
*/
void
_bt_check_third_page(Relation rel, Relation heap, bool needheaptidspace,
Page page, IndexTuple newtup)
{
Size itemsz;
BTPageOpaque opaque;
itemsz = MAXALIGN(IndexTupleSize(newtup));
/* Double check item size against limit */
if (itemsz <= BTMaxItemSize)
return;
/*
* Tuple is probably too large to fit on page, but it's possible that the
* index uses version 2 or version 3, or that page is an internal page, in
* which case a slightly higher limit applies.
*/
if (!needheaptidspace && itemsz <= BTMaxItemSizeNoHeapTid)
return;
/*
* Internal page insertions cannot fail here, because that would mean that
* an earlier leaf level insertion that should have failed didn't
*/
opaque = BTPageGetOpaque(page);
if (!P_ISLEAF(opaque))
elog(ERROR, "cannot insert oversized tuple of size %zu on internal page of index \"%s\"",
itemsz, RelationGetRelationName(rel));
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("index row size %zu exceeds btree version %u maximum %zu for index \"%s\"",
itemsz,
needheaptidspace ? BTREE_VERSION : BTREE_NOVAC_VERSION,
needheaptidspace ? BTMaxItemSize : BTMaxItemSizeNoHeapTid,
RelationGetRelationName(rel)),
errdetail("Index row references tuple (%u,%u) in relation \"%s\".",
ItemPointerGetBlockNumber(BTreeTupleGetHeapTID(newtup)),
ItemPointerGetOffsetNumber(BTreeTupleGetHeapTID(newtup)),
RelationGetRelationName(heap)),
errhint("Values larger than 1/3 of a buffer page cannot be indexed.\n"
"Consider a function index of an MD5 hash of the value, "
"or use full text indexing."),
errtableconstraint(heap, RelationGetRelationName(rel))));
}
/*
* Are all attributes in rel "equality is image equality" attributes?
*
* We use each attribute's BTEQUALIMAGE_PROC opclass procedure. If any
* opclass either lacks a BTEQUALIMAGE_PROC procedure or returns false, we
* return false; otherwise we return true.
*
* Returned boolean value is stored in index metapage during index builds.
* Deduplication can only be used when we return true.
*/
bool
_bt_allequalimage(Relation rel, bool debugmessage)
{
bool allequalimage = true;
/* INCLUDE indexes can never support deduplication */
if (IndexRelationGetNumberOfAttributes(rel) !=
IndexRelationGetNumberOfKeyAttributes(rel))
return false;
for (int i = 0; i < IndexRelationGetNumberOfKeyAttributes(rel); i++)
{
Oid opfamily = rel->rd_opfamily[i];
Oid opcintype = rel->rd_opcintype[i];
Oid collation = rel->rd_indcollation[i];
Oid equalimageproc;
equalimageproc = get_opfamily_proc(opfamily, opcintype, opcintype,
BTEQUALIMAGE_PROC);
/*
* If there is no BTEQUALIMAGE_PROC then deduplication is assumed to
* be unsafe. Otherwise, actually call proc and see what it says.
*/
if (!OidIsValid(equalimageproc) ||
!DatumGetBool(OidFunctionCall1Coll(equalimageproc, collation,
ObjectIdGetDatum(opcintype))))
{
allequalimage = false;
break;
}
}
if (debugmessage)
{
if (allequalimage)
elog(DEBUG1, "index \"%s\" can safely use deduplication",
RelationGetRelationName(rel));
else
elog(DEBUG1, "index \"%s\" cannot use deduplication",
RelationGetRelationName(rel));
}
return allequalimage;
}
|