diff --git a/contrib/amcheck/verify_nbtree.c b/contrib/amcheck/verify_nbtree.c index 591e0a3e46..0424e0eb27 100644 --- a/contrib/amcheck/verify_nbtree.c +++ b/contrib/amcheck/verify_nbtree.c @@ -155,11 +155,14 @@ static inline bool invariant_g_offset(BtreeCheckState *state, BTScanInsert key, OffsetNumber lowerbound); static inline bool invariant_l_nontarget_offset(BtreeCheckState *state, BTScanInsert key, + BlockNumber nontargetblock, Page nontarget, OffsetNumber upperbound); static Page palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum); static inline BTScanInsert bt_mkscankey_pivotsearch(Relation rel, IndexTuple itup); +static ItemId PageGetItemIdCareful(BtreeCheckState *state, BlockNumber block, + Page page, OffsetNumber offset); static inline ItemPointer BTreeTupleGetHeapTIDCareful(BtreeCheckState *state, IndexTuple itup, bool nonpivot); @@ -710,7 +713,9 @@ bt_check_level_from_leftmost(BtreeCheckState *state, BtreeLevel level) ItemId itemid; /* Internal page -- downlink gets leftmost on next level */ - itemid = PageGetItemId(state->target, P_FIRSTDATAKEY(opaque)); + itemid = PageGetItemIdCareful(state, state->targetblock, + state->target, + P_FIRSTDATAKEY(opaque)); itup = (IndexTuple) PageGetItem(state->target, itemid); nextleveldown.leftmost = BTreeInnerTupleGetDownLink(itup); nextleveldown.level = opaque->btpo.level - 1; @@ -837,26 +842,29 @@ bt_target_page_check(BtreeCheckState *state) * Check the number of attributes in high key. Note, rightmost page * doesn't contain a high key, so nothing to check */ - if (!P_RIGHTMOST(topaque) && - !_bt_check_natts(state->rel, state->heapkeyspace, state->target, - P_HIKEY)) + if (!P_RIGHTMOST(topaque)) { ItemId itemid; IndexTuple itup; - itemid = PageGetItemId(state->target, P_HIKEY); - itup = (IndexTuple) PageGetItem(state->target, itemid); - - ereport(ERROR, - (errcode(ERRCODE_INDEX_CORRUPTED), - errmsg("wrong number of high key index tuple attributes in index \"%s\"", - RelationGetRelationName(state->rel)), - errdetail_internal("Index block=%u natts=%u block type=%s page lsn=%X/%X.", - state->targetblock, - BTreeTupleGetNAtts(itup, state->rel), - P_ISLEAF(topaque) ? "heap" : "index", - (uint32) (state->targetlsn >> 32), - (uint32) state->targetlsn))); + /* Verify line pointer before checking tuple */ + itemid = PageGetItemIdCareful(state, state->targetblock, + state->target, P_HIKEY); + if (!_bt_check_natts(state->rel, state->heapkeyspace, state->target, + P_HIKEY)) + { + itup = (IndexTuple) PageGetItem(state->target, itemid); + ereport(ERROR, + (errcode(ERRCODE_INDEX_CORRUPTED), + errmsg("wrong number of high key index tuple attributes in index \"%s\"", + RelationGetRelationName(state->rel)), + errdetail_internal("Index block=%u natts=%u block type=%s page lsn=%X/%X.", + state->targetblock, + BTreeTupleGetNAtts(itup, state->rel), + P_ISLEAF(topaque) ? "heap" : "index", + (uint32) (state->targetlsn >> 32), + (uint32) state->targetlsn))); + } } /* @@ -876,7 +884,8 @@ bt_target_page_check(BtreeCheckState *state) CHECK_FOR_INTERRUPTS(); - itemid = PageGetItemId(state->target, offset); + itemid = PageGetItemIdCareful(state, state->targetblock, + state->target, offset); itup = (IndexTuple) PageGetItem(state->target, itemid); tupsize = IndexTupleSize(itup); @@ -1121,7 +1130,9 @@ bt_target_page_check(BtreeCheckState *state) OffsetNumberNext(offset)); /* Reuse itup to get pointed-to heap location of second item */ - itemid = PageGetItemId(state->target, OffsetNumberNext(offset)); + itemid = PageGetItemIdCareful(state, state->targetblock, + state->target, + OffsetNumberNext(offset)); itup = (IndexTuple) PageGetItem(state->target, itemid); nhtid = psprintf("(%u,%u)", ItemPointerGetBlockNumberNoCheck(&(itup->t_tid)), @@ -1406,7 +1417,8 @@ bt_right_page_check_scankey(BtreeCheckState *state) if (P_ISLEAF(opaque) && nline >= P_FIRSTDATAKEY(opaque)) { /* Return first data item (if any) */ - rightitem = PageGetItemId(rightpage, P_FIRSTDATAKEY(opaque)); + rightitem = PageGetItemIdCareful(state, targetnext, rightpage, + P_FIRSTDATAKEY(opaque)); } else if (!P_ISLEAF(opaque) && nline >= OffsetNumberNext(P_FIRSTDATAKEY(opaque))) @@ -1415,8 +1427,8 @@ bt_right_page_check_scankey(BtreeCheckState *state) * Return first item after the internal page's "negative infinity" * item */ - rightitem = PageGetItemId(rightpage, - OffsetNumberNext(P_FIRSTDATAKEY(opaque))); + rightitem = PageGetItemIdCareful(state, targetnext, rightpage, + OffsetNumberNext(P_FIRSTDATAKEY(opaque))); } else { @@ -1576,7 +1588,8 @@ bt_downlink_check(BtreeCheckState *state, BTScanInsert targetkey, if (offset_is_negative_infinity(copaque, offset)) continue; - if (!invariant_l_nontarget_offset(state, targetkey, child, offset)) + if (!invariant_l_nontarget_offset(state, targetkey, childblock, child, + offset)) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("down-link lower bound invariant violated for index \"%s\"", @@ -1687,7 +1700,8 @@ bt_downlink_missing_check(BtreeCheckState *state) RelationGetRelationName(state->rel)); level = topaque->btpo.level; - itemid = PageGetItemId(state->target, P_FIRSTDATAKEY(topaque)); + itemid = PageGetItemIdCareful(state, state->targetblock, state->target, + P_FIRSTDATAKEY(topaque)); itup = (IndexTuple) PageGetItem(state->target, itemid); childblk = BTreeInnerTupleGetDownLink(itup); for (;;) @@ -1711,7 +1725,8 @@ bt_downlink_missing_check(BtreeCheckState *state) level - 1, copaque->btpo.level))); level = copaque->btpo.level; - itemid = PageGetItemId(child, P_FIRSTDATAKEY(copaque)); + itemid = PageGetItemIdCareful(state, childblk, child, + P_FIRSTDATAKEY(copaque)); itup = (IndexTuple) PageGetItem(child, itemid); childblk = BTreeInnerTupleGetDownLink(itup); /* Be slightly more pro-active in freeing this memory, just in case */ @@ -1760,7 +1775,7 @@ bt_downlink_missing_check(BtreeCheckState *state) */ if (P_ISHALFDEAD(copaque) && !P_RIGHTMOST(copaque)) { - itemid = PageGetItemId(child, P_HIKEY); + itemid = PageGetItemIdCareful(state, childblk, child, P_HIKEY); itup = (IndexTuple) PageGetItem(child, itemid); if (BTreeTupleGetTopParent(itup) == state->targetblock) return; @@ -2087,6 +2102,8 @@ offset_is_negative_infinity(BTPageOpaque opaque, OffsetNumber offset) * Does the invariant hold that the key is strictly less than a given upper * bound offset item? * + * Verifies line pointer on behalf of caller. + * * If this function returns false, convention is that caller throws error due * to corruption. */ @@ -2094,10 +2111,14 @@ static inline bool invariant_l_offset(BtreeCheckState *state, BTScanInsert key, OffsetNumber upperbound) { + ItemId itemid; int32 cmp; Assert(key->pivotsearch); + /* Verify line pointer before checking tuple */ + itemid = PageGetItemIdCareful(state, state->targetblock, state->target, + upperbound); /* pg_upgrade'd indexes may legally have equal sibling tuples */ if (!key->heapkeyspace) return invariant_leq_offset(state, key, upperbound); @@ -2116,13 +2137,11 @@ invariant_l_offset(BtreeCheckState *state, BTScanInsert key, if (cmp == 0) { BTPageOpaque topaque; - ItemId itemid; IndexTuple ritup; int uppnkeyatts; ItemPointer rheaptid; bool nonpivot; - itemid = PageGetItemId(state->target, upperbound); ritup = (IndexTuple) PageGetItem(state->target, itemid); topaque = (BTPageOpaque) PageGetSpecialPointer(state->target); nonpivot = P_ISLEAF(topaque) && upperbound >= P_FIRSTDATAKEY(topaque); @@ -2145,6 +2164,9 @@ invariant_l_offset(BtreeCheckState *state, BTScanInsert key, * Does the invariant hold that the key is less than or equal to a given upper * bound offset item? * + * Caller should have verified that upperbound's item pointer is consistent + * using PageGetItemIdCareful() call. + * * If this function returns false, convention is that caller throws error due * to corruption. */ @@ -2165,6 +2187,9 @@ invariant_leq_offset(BtreeCheckState *state, BTScanInsert key, * Does the invariant hold that the key is strictly greater than a given lower * bound offset item? * + * Caller should have verified that lowerbound's item pointer is consistent + * using PageGetItemIdCareful() call. + * * If this function returns false, convention is that caller throws error due * to corruption. */ @@ -2199,19 +2224,24 @@ invariant_g_offset(BtreeCheckState *state, BTScanInsert key, * * Caller's non-target page is a child page of the target, checked as part of * checking a property of the target page (i.e. the key comes from the - * target). + * target). Verifies line pointer on behalf of caller. * * If this function returns false, convention is that caller throws error due * to corruption. */ static inline bool invariant_l_nontarget_offset(BtreeCheckState *state, BTScanInsert key, - Page nontarget, OffsetNumber upperbound) + BlockNumber nontargetblock, Page nontarget, + OffsetNumber upperbound) { + ItemId itemid; int32 cmp; Assert(key->pivotsearch); + /* Verify line pointer before checking tuple */ + itemid = PageGetItemIdCareful(state, nontargetblock, nontarget, + upperbound); cmp = _bt_compare(state->rel, key, nontarget, upperbound); /* pg_upgrade'd indexes may legally have equal sibling tuples */ @@ -2221,14 +2251,12 @@ invariant_l_nontarget_offset(BtreeCheckState *state, BTScanInsert key, /* See invariant_l_offset() for an explanation of this extra step */ if (cmp == 0) { - ItemId itemid; IndexTuple child; int uppnkeyatts; ItemPointer childheaptid; BTPageOpaque copaque; bool nonpivot; - itemid = PageGetItemId(nontarget, upperbound); child = (IndexTuple) PageGetItem(nontarget, itemid); copaque = (BTPageOpaque) PageGetSpecialPointer(nontarget); nonpivot = P_ISLEAF(copaque) && upperbound >= P_FIRSTDATAKEY(copaque); @@ -2426,6 +2454,55 @@ bt_mkscankey_pivotsearch(Relation rel, IndexTuple itup) return skey; } +/* + * PageGetItemId() wrapper that validates returned line pointer. + * + * Buffer page/page item access macros generally trust that line pointers are + * not corrupt, which might cause problems for verification itself. For + * example, there is no bounds checking in PageGetItem(). Passing it a + * corrupt line pointer can cause it to return a tuple/pointer that is unsafe + * to dereference. + * + * Validating line pointers before tuples avoids undefined behavior and + * assertion failures with corrupt indexes, making the verification process + * more robust and predictable. + */ +static ItemId +PageGetItemIdCareful(BtreeCheckState *state, BlockNumber block, Page page, + OffsetNumber offset) +{ + ItemId itemid = PageGetItemId(page, offset); + + if (ItemIdGetOffset(itemid) + ItemIdGetLength(itemid) > + BLCKSZ - sizeof(BTPageOpaqueData)) + ereport(ERROR, + (errcode(ERRCODE_INDEX_CORRUPTED), + errmsg("line pointer points past end of tuple space in index \"%s\"", + RelationGetRelationName(state->rel)), + errdetail_internal("Index tid=(%u,%u) lp_off=%u, lp_len=%u lp_flags=%u.", + block, offset, ItemIdGetOffset(itemid), + ItemIdGetLength(itemid), + ItemIdGetFlags(itemid)))); + + /* + * Verify that line pointer isn't LP_REDIRECT or LP_UNUSED, since nbtree + * never uses either. Verify that line pointer has storage, too, since + * even LP_DEAD items should within nbtree. + */ + if (ItemIdIsRedirected(itemid) || !ItemIdIsUsed(itemid) || + ItemIdGetLength(itemid) == 0) + ereport(ERROR, + (errcode(ERRCODE_INDEX_CORRUPTED), + errmsg("invalid line pointer storage in index \"%s\"", + RelationGetRelationName(state->rel)), + errdetail_internal("Index tid=(%u,%u) lp_off=%u, lp_len=%u lp_flags=%u.", + block, offset, ItemIdGetOffset(itemid), + ItemIdGetLength(itemid), + ItemIdGetFlags(itemid)))); + + return itemid; +} + /* * BTreeTupleGetHeapTID() wrapper that lets caller enforce that a heap TID must * be present in cases where that is mandatory.