Use FLEXIBLE_ARRAY_MEMBER for HeapTupleHeaderData.t_bits[].
This requires changing quite a few places that were depending on sizeof(HeapTupleHeaderData), but it seems for the best. Michael Paquier, some adjustments by me
This commit is contained in:
parent
3d9b6f31ee
commit
e1a11d9311
19 changed files with 102 additions and 99 deletions
|
@ -932,7 +932,7 @@ estimate_size(PlannerInfo *root, RelOptInfo *baserel,
|
|||
int tuple_width;
|
||||
|
||||
tuple_width = MAXALIGN(baserel->width) +
|
||||
MAXALIGN(sizeof(HeapTupleHeaderData));
|
||||
MAXALIGN(SizeofHeapTupleHeader);
|
||||
ntuples = clamp_row_est((double) stat_buf.st_size /
|
||||
(double) tuple_width);
|
||||
}
|
||||
|
|
|
@ -149,7 +149,7 @@ heap_page_items(PG_FUNCTION_ARGS)
|
|||
* many other ways, but at least we won't crash.
|
||||
*/
|
||||
if (ItemIdHasStorage(id) &&
|
||||
lp_len >= sizeof(HeapTupleHeader) &&
|
||||
lp_len >= MinHeapTupleSize &&
|
||||
lp_offset == MAXALIGN(lp_offset) &&
|
||||
lp_offset + lp_len <= raw_page_size)
|
||||
{
|
||||
|
@ -169,18 +169,19 @@ heap_page_items(PG_FUNCTION_ARGS)
|
|||
values[10] = UInt8GetDatum(tuphdr->t_hoff);
|
||||
|
||||
/*
|
||||
* We already checked that the item as is completely within the
|
||||
* raw page passed to us, with the length given in the line
|
||||
* pointer.. Let's check that t_hoff doesn't point over lp_len,
|
||||
* before using it to access t_bits and oid.
|
||||
* We already checked that the item is completely within the raw
|
||||
* page passed to us, with the length given in the line pointer.
|
||||
* Let's check that t_hoff doesn't point over lp_len, before using
|
||||
* it to access t_bits and oid.
|
||||
*/
|
||||
if (tuphdr->t_hoff >= sizeof(HeapTupleHeader) &&
|
||||
tuphdr->t_hoff <= lp_len)
|
||||
if (tuphdr->t_hoff >= SizeofHeapTupleHeader &&
|
||||
tuphdr->t_hoff <= lp_len &&
|
||||
tuphdr->t_hoff == MAXALIGN(tuphdr->t_hoff))
|
||||
{
|
||||
if (tuphdr->t_infomask & HEAP_HASNULL)
|
||||
{
|
||||
bits_len = tuphdr->t_hoff -
|
||||
(((char *) tuphdr->t_bits) -((char *) tuphdr));
|
||||
offsetof(HeapTupleHeaderData, t_bits);
|
||||
|
||||
values[11] = CStringGetTextDatum(
|
||||
bits_to_text(tuphdr->t_bits, bits_len * 8));
|
||||
|
|
|
@ -519,7 +519,7 @@ postgresGetForeignRelSize(PlannerInfo *root,
|
|||
{
|
||||
baserel->pages = 10;
|
||||
baserel->tuples =
|
||||
(10 * BLCKSZ) / (baserel->width + sizeof(HeapTupleHeaderData));
|
||||
(10 * BLCKSZ) / (baserel->width + MAXALIGN(SizeofHeapTupleHeader));
|
||||
}
|
||||
|
||||
/* Estimate baserel size as best we can with local statistics. */
|
||||
|
|
|
@ -1434,7 +1434,7 @@ heap_form_minimal_tuple(TupleDesc tupleDescriptor,
|
|||
/*
|
||||
* Determine total space needed
|
||||
*/
|
||||
len = offsetof(MinimalTupleData, t_bits);
|
||||
len = SizeofMinimalTupleHeader;
|
||||
|
||||
if (hasnull)
|
||||
len += BITMAPLEN(numberOfAttributes);
|
||||
|
|
|
@ -2186,8 +2186,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
|
|||
XLogRegisterBufData(0, (char *) &xlhdr, SizeOfHeapHeader);
|
||||
/* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
|
||||
XLogRegisterBufData(0,
|
||||
(char *) heaptup->t_data + offsetof(HeapTupleHeaderData, t_bits),
|
||||
heaptup->t_len - offsetof(HeapTupleHeaderData, t_bits));
|
||||
(char *) heaptup->t_data + SizeofHeapTupleHeader,
|
||||
heaptup->t_len - SizeofHeapTupleHeader);
|
||||
|
||||
recptr = XLogInsert(RM_HEAP_ID, info);
|
||||
|
||||
|
@ -2460,9 +2460,9 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
|
|||
tuphdr->t_hoff = heaptup->t_data->t_hoff;
|
||||
|
||||
/* write bitmap [+ padding] [+ oid] + data */
|
||||
datalen = heaptup->t_len - offsetof(HeapTupleHeaderData, t_bits);
|
||||
datalen = heaptup->t_len - SizeofHeapTupleHeader;
|
||||
memcpy(scratchptr,
|
||||
(char *) heaptup->t_data + offsetof(HeapTupleHeaderData, t_bits),
|
||||
(char *) heaptup->t_data + SizeofHeapTupleHeader,
|
||||
datalen);
|
||||
tuphdr->datalen = datalen;
|
||||
scratchptr += datalen;
|
||||
|
@ -2904,9 +2904,9 @@ l1:
|
|||
|
||||
XLogRegisterData((char *) &xlhdr, SizeOfHeapHeader);
|
||||
XLogRegisterData((char *) old_key_tuple->t_data
|
||||
+ offsetof(HeapTupleHeaderData, t_bits),
|
||||
+ SizeofHeapTupleHeader,
|
||||
old_key_tuple->t_len
|
||||
- offsetof(HeapTupleHeaderData, t_bits));
|
||||
- SizeofHeapTupleHeader);
|
||||
}
|
||||
|
||||
recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
|
||||
|
@ -6732,7 +6732,7 @@ log_heap_update(Relation reln, Buffer oldbuf,
|
|||
xlhdr.t_infomask2 = newtup->t_data->t_infomask2;
|
||||
xlhdr.t_infomask = newtup->t_data->t_infomask;
|
||||
xlhdr.t_hoff = newtup->t_data->t_hoff;
|
||||
Assert(offsetof(HeapTupleHeaderData, t_bits) + prefixlen + suffixlen <= newtup->t_len);
|
||||
Assert(SizeofHeapTupleHeader + prefixlen + suffixlen <= newtup->t_len);
|
||||
|
||||
/*
|
||||
* PG73FORMAT: write bitmap [+ padding] [+ oid] + data
|
||||
|
@ -6743,8 +6743,8 @@ log_heap_update(Relation reln, Buffer oldbuf,
|
|||
if (prefixlen == 0)
|
||||
{
|
||||
XLogRegisterBufData(0,
|
||||
((char *) newtup->t_data) + offsetof(HeapTupleHeaderData, t_bits),
|
||||
newtup->t_len - offsetof(HeapTupleHeaderData, t_bits) -suffixlen);
|
||||
((char *) newtup->t_data) + SizeofHeapTupleHeader,
|
||||
newtup->t_len - SizeofHeapTupleHeader - suffixlen);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -6753,11 +6753,11 @@ log_heap_update(Relation reln, Buffer oldbuf,
|
|||
* two separate rdata entries.
|
||||
*/
|
||||
/* bitmap [+ padding] [+ oid] */
|
||||
if (newtup->t_data->t_hoff - offsetof(HeapTupleHeaderData, t_bits) >0)
|
||||
if (newtup->t_data->t_hoff - SizeofHeapTupleHeader > 0)
|
||||
{
|
||||
XLogRegisterBufData(0,
|
||||
((char *) newtup->t_data) + offsetof(HeapTupleHeaderData, t_bits),
|
||||
newtup->t_data->t_hoff - offsetof(HeapTupleHeaderData, t_bits));
|
||||
((char *) newtup->t_data) + SizeofHeapTupleHeader,
|
||||
newtup->t_data->t_hoff - SizeofHeapTupleHeader);
|
||||
}
|
||||
|
||||
/* data after common prefix */
|
||||
|
@ -6777,8 +6777,8 @@ log_heap_update(Relation reln, Buffer oldbuf,
|
|||
XLogRegisterData((char *) &xlhdr_idx, SizeOfHeapHeader);
|
||||
|
||||
/* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
|
||||
XLogRegisterData((char *) old_key_tuple->t_data + offsetof(HeapTupleHeaderData, t_bits),
|
||||
old_key_tuple->t_len - offsetof(HeapTupleHeaderData, t_bits));
|
||||
XLogRegisterData((char *) old_key_tuple->t_data + SizeofHeapTupleHeader,
|
||||
old_key_tuple->t_len - SizeofHeapTupleHeader);
|
||||
}
|
||||
|
||||
recptr = XLogInsert(RM_HEAP_ID, info);
|
||||
|
@ -7351,7 +7351,7 @@ heap_xlog_insert(XLogReaderState *record)
|
|||
xl_heap_insert *xlrec = (xl_heap_insert *) XLogRecGetData(record);
|
||||
Buffer buffer;
|
||||
Page page;
|
||||
struct
|
||||
union
|
||||
{
|
||||
HeapTupleHeaderData hdr;
|
||||
char data[MaxHeapTupleSize];
|
||||
|
@ -7415,12 +7415,12 @@ heap_xlog_insert(XLogReaderState *record)
|
|||
data += SizeOfHeapHeader;
|
||||
|
||||
htup = &tbuf.hdr;
|
||||
MemSet((char *) htup, 0, sizeof(HeapTupleHeaderData));
|
||||
MemSet((char *) htup, 0, SizeofHeapTupleHeader);
|
||||
/* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
|
||||
memcpy((char *) htup + offsetof(HeapTupleHeaderData, t_bits),
|
||||
memcpy((char *) htup + SizeofHeapTupleHeader,
|
||||
data,
|
||||
newlen);
|
||||
newlen += offsetof(HeapTupleHeaderData, t_bits);
|
||||
newlen += SizeofHeapTupleHeader;
|
||||
htup->t_infomask2 = xlhdr.t_infomask2;
|
||||
htup->t_infomask = xlhdr.t_infomask;
|
||||
htup->t_hoff = xlhdr.t_hoff;
|
||||
|
@ -7469,7 +7469,7 @@ heap_xlog_multi_insert(XLogReaderState *record)
|
|||
BlockNumber blkno;
|
||||
Buffer buffer;
|
||||
Page page;
|
||||
struct
|
||||
union
|
||||
{
|
||||
HeapTupleHeaderData hdr;
|
||||
char data[MaxHeapTupleSize];
|
||||
|
@ -7548,14 +7548,14 @@ heap_xlog_multi_insert(XLogReaderState *record)
|
|||
newlen = xlhdr->datalen;
|
||||
Assert(newlen <= MaxHeapTupleSize);
|
||||
htup = &tbuf.hdr;
|
||||
MemSet((char *) htup, 0, sizeof(HeapTupleHeaderData));
|
||||
MemSet((char *) htup, 0, SizeofHeapTupleHeader);
|
||||
/* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
|
||||
memcpy((char *) htup + offsetof(HeapTupleHeaderData, t_bits),
|
||||
memcpy((char *) htup + SizeofHeapTupleHeader,
|
||||
(char *) tupdata,
|
||||
newlen);
|
||||
tupdata += newlen;
|
||||
|
||||
newlen += offsetof(HeapTupleHeaderData, t_bits);
|
||||
newlen += SizeofHeapTupleHeader;
|
||||
htup->t_infomask2 = xlhdr->t_infomask2;
|
||||
htup->t_infomask = xlhdr->t_infomask;
|
||||
htup->t_hoff = xlhdr->t_hoff;
|
||||
|
@ -7618,7 +7618,7 @@ heap_xlog_update(XLogReaderState *record, bool hot_update)
|
|||
uint16 prefixlen = 0,
|
||||
suffixlen = 0;
|
||||
char *newp;
|
||||
struct
|
||||
union
|
||||
{
|
||||
HeapTupleHeaderData hdr;
|
||||
char data[MaxHeapTupleSize];
|
||||
|
@ -7780,19 +7780,19 @@ heap_xlog_update(XLogReaderState *record, bool hot_update)
|
|||
Assert(tuplen <= MaxHeapTupleSize);
|
||||
|
||||
htup = &tbuf.hdr;
|
||||
MemSet((char *) htup, 0, sizeof(HeapTupleHeaderData));
|
||||
MemSet((char *) htup, 0, SizeofHeapTupleHeader);
|
||||
|
||||
/*
|
||||
* Reconstruct the new tuple using the prefix and/or suffix from the
|
||||
* old tuple, and the data stored in the WAL record.
|
||||
*/
|
||||
newp = (char *) htup + offsetof(HeapTupleHeaderData, t_bits);
|
||||
newp = (char *) htup + SizeofHeapTupleHeader;
|
||||
if (prefixlen > 0)
|
||||
{
|
||||
int len;
|
||||
|
||||
/* copy bitmap [+ padding] [+ oid] from WAL record */
|
||||
len = xlhdr.t_hoff - offsetof(HeapTupleHeaderData, t_bits);
|
||||
len = xlhdr.t_hoff - SizeofHeapTupleHeader;
|
||||
memcpy(newp, recdata, len);
|
||||
recdata += len;
|
||||
newp += len;
|
||||
|
@ -7802,7 +7802,7 @@ heap_xlog_update(XLogReaderState *record, bool hot_update)
|
|||
newp += prefixlen;
|
||||
|
||||
/* copy new tuple data from WAL record */
|
||||
len = tuplen - (xlhdr.t_hoff - offsetof(HeapTupleHeaderData, t_bits));
|
||||
len = tuplen - (xlhdr.t_hoff - SizeofHeapTupleHeader);
|
||||
memcpy(newp, recdata, len);
|
||||
recdata += len;
|
||||
newp += len;
|
||||
|
@ -7823,7 +7823,7 @@ heap_xlog_update(XLogReaderState *record, bool hot_update)
|
|||
if (suffixlen > 0)
|
||||
memcpy(newp, (char *) oldtup.t_data + oldtup.t_len - suffixlen, suffixlen);
|
||||
|
||||
newlen = offsetof(HeapTupleHeaderData, t_bits) + tuplen + prefixlen + suffixlen;
|
||||
newlen = SizeofHeapTupleHeader + tuplen + prefixlen + suffixlen;
|
||||
htup->t_infomask2 = xlhdr.t_infomask2;
|
||||
htup->t_infomask = xlhdr.t_infomask;
|
||||
htup->t_hoff = xlhdr.t_hoff;
|
||||
|
|
|
@ -677,7 +677,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
|
|||
*/
|
||||
|
||||
/* compute header overhead --- this should match heap_form_tuple() */
|
||||
hoff = offsetof(HeapTupleHeaderData, t_bits);
|
||||
hoff = SizeofHeapTupleHeader;
|
||||
if (has_nulls)
|
||||
hoff += BITMAPLEN(numAttrs);
|
||||
if (newtup->t_data->t_infomask & HEAP_HASOID)
|
||||
|
@ -963,7 +963,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
|
|||
* different conclusion about the size of the null bitmap, or even
|
||||
* whether there needs to be one at all.
|
||||
*/
|
||||
new_header_len = offsetof(HeapTupleHeaderData, t_bits);
|
||||
new_header_len = SizeofHeapTupleHeader;
|
||||
if (has_nulls)
|
||||
new_header_len += BITMAPLEN(numAttrs);
|
||||
if (olddata->t_infomask & HEAP_HASOID)
|
||||
|
@ -986,7 +986,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
|
|||
/*
|
||||
* Copy the existing tuple header, but adjust natts and t_hoff.
|
||||
*/
|
||||
memcpy(new_data, olddata, offsetof(HeapTupleHeaderData, t_bits));
|
||||
memcpy(new_data, olddata, SizeofHeapTupleHeader);
|
||||
HeapTupleHeaderSetNatts(new_data, numAttrs);
|
||||
new_data->t_hoff = new_header_len;
|
||||
if (olddata->t_infomask & HEAP_HASOID)
|
||||
|
@ -1196,7 +1196,7 @@ toast_flatten_tuple_to_datum(HeapTupleHeader tup,
|
|||
*
|
||||
* This should match the reconstruction code in toast_insert_or_update.
|
||||
*/
|
||||
new_header_len = offsetof(HeapTupleHeaderData, t_bits);
|
||||
new_header_len = SizeofHeapTupleHeader;
|
||||
if (has_nulls)
|
||||
new_header_len += BITMAPLEN(numAttrs);
|
||||
if (tup->t_infomask & HEAP_HASOID)
|
||||
|
@ -1211,7 +1211,7 @@ toast_flatten_tuple_to_datum(HeapTupleHeader tup,
|
|||
/*
|
||||
* Copy the existing tuple header, but adjust natts and t_hoff.
|
||||
*/
|
||||
memcpy(new_data, tup, offsetof(HeapTupleHeaderData, t_bits));
|
||||
memcpy(new_data, tup, SizeofHeapTupleHeader);
|
||||
HeapTupleHeaderSetNatts(new_data, numAttrs);
|
||||
new_data->t_hoff = new_header_len;
|
||||
if (tup->t_infomask & HEAP_HASOID)
|
||||
|
|
|
@ -447,7 +447,7 @@ needs_toast_table(Relation rel)
|
|||
return false; /* nothing to toast? */
|
||||
if (maxlength_unknown)
|
||||
return true; /* any unlimited-length attrs? */
|
||||
tuple_length = MAXALIGN(offsetof(HeapTupleHeaderData, t_bits) +
|
||||
tuple_length = MAXALIGN(SizeofHeapTupleHeader +
|
||||
BITMAPLEN(tupdesc->natts)) +
|
||||
MAXALIGN(data_length);
|
||||
return (tuple_length > TOAST_TUPLE_THRESHOLD);
|
||||
|
|
|
@ -439,7 +439,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
|
|||
* don't count palloc overhead either.
|
||||
*/
|
||||
tupsize = HJTUPLE_OVERHEAD +
|
||||
MAXALIGN(sizeof(MinimalTupleData)) +
|
||||
MAXALIGN(SizeofMinimalTupleHeader) +
|
||||
MAXALIGN(tupwidth);
|
||||
inner_rel_bytes = ntuples * tupsize;
|
||||
|
||||
|
|
|
@ -4036,11 +4036,11 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel)
|
|||
|
||||
/*
|
||||
* If we have a whole-row reference, estimate its width as the sum of
|
||||
* per-column widths plus sizeof(HeapTupleHeaderData).
|
||||
* per-column widths plus heap tuple header overhead.
|
||||
*/
|
||||
if (have_wholerow_var)
|
||||
{
|
||||
int32 wholerow_width = sizeof(HeapTupleHeaderData);
|
||||
int32 wholerow_width = MAXALIGN(SizeofHeapTupleHeader);
|
||||
|
||||
if (reloid != InvalidOid)
|
||||
{
|
||||
|
@ -4078,7 +4078,7 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel)
|
|||
static double
|
||||
relation_byte_size(double tuples, int width)
|
||||
{
|
||||
return tuples * (MAXALIGN(width) + MAXALIGN(sizeof(HeapTupleHeaderData)));
|
||||
return tuples * (MAXALIGN(width) + MAXALIGN(SizeofHeapTupleHeader));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -2755,7 +2755,7 @@ choose_hashed_grouping(PlannerInfo *root,
|
|||
*/
|
||||
|
||||
/* Estimate per-hash-entry space at tuple width... */
|
||||
hashentrysize = MAXALIGN(path_width) + MAXALIGN(sizeof(MinimalTupleData));
|
||||
hashentrysize = MAXALIGN(path_width) + MAXALIGN(SizeofMinimalTupleHeader);
|
||||
/* plus space for pass-by-ref transition values... */
|
||||
hashentrysize += agg_costs->transitionSpace;
|
||||
/* plus the per-hash-entry overhead */
|
||||
|
@ -2923,7 +2923,7 @@ choose_hashed_distinct(PlannerInfo *root,
|
|||
*/
|
||||
|
||||
/* Estimate per-hash-entry space at tuple width... */
|
||||
hashentrysize = MAXALIGN(path_width) + MAXALIGN(sizeof(MinimalTupleData));
|
||||
hashentrysize = MAXALIGN(path_width) + MAXALIGN(SizeofMinimalTupleHeader);
|
||||
/* plus the per-hash-entry overhead */
|
||||
hashentrysize += hash_agg_entry_size(0);
|
||||
|
||||
|
|
|
@ -974,12 +974,12 @@ subplan_is_hashable(Plan *plan)
|
|||
|
||||
/*
|
||||
* The estimated size of the subquery result must fit in work_mem. (Note:
|
||||
* we use sizeof(HeapTupleHeaderData) here even though the tuples will
|
||||
* actually be stored as MinimalTuples; this provides some fudge factor
|
||||
* for hashtable overhead.)
|
||||
* we use heap tuple overhead here even though the tuples will actually be
|
||||
* stored as MinimalTuples; this provides some fudge factor for hashtable
|
||||
* overhead.)
|
||||
*/
|
||||
subquery_size = plan->plan_rows *
|
||||
(MAXALIGN(plan->plan_width) + MAXALIGN(sizeof(HeapTupleHeaderData)));
|
||||
(MAXALIGN(plan->plan_width) + MAXALIGN(SizeofHeapTupleHeader));
|
||||
if (subquery_size > work_mem * 1024L)
|
||||
return false;
|
||||
|
||||
|
|
|
@ -832,7 +832,7 @@ choose_hashed_setop(PlannerInfo *root, List *groupClauses,
|
|||
* Don't do it if it doesn't look like the hashtable will fit into
|
||||
* work_mem.
|
||||
*/
|
||||
hashentrysize = MAXALIGN(input_plan->plan_width) + MAXALIGN(sizeof(MinimalTupleData));
|
||||
hashentrysize = MAXALIGN(input_plan->plan_width) + MAXALIGN(SizeofMinimalTupleHeader);
|
||||
|
||||
if (hashentrysize * dNumGroups > work_mem * 1024L)
|
||||
return false;
|
||||
|
|
|
@ -508,7 +508,7 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
|
|||
int32 tuple_width;
|
||||
|
||||
tuple_width = get_rel_data_width(rel, attr_widths);
|
||||
tuple_width += sizeof(HeapTupleHeaderData);
|
||||
tuple_width += MAXALIGN(SizeofHeapTupleHeader);
|
||||
tuple_width += sizeof(ItemIdData);
|
||||
/* note: integer division is intentional here */
|
||||
density = (BLCKSZ - SizeOfPageHeaderData) / tuple_width;
|
||||
|
|
|
@ -765,21 +765,19 @@ DecodeMultiInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
|
|||
* transactions.
|
||||
*/
|
||||
tuple->tuple.t_tableOid = InvalidOid;
|
||||
tuple->tuple.t_data = &tuple->header;
|
||||
tuple->tuple.t_len = datalen
|
||||
+ offsetof(HeapTupleHeaderData, t_bits);
|
||||
tuple->tuple.t_data = &tuple->t_data.header;
|
||||
tuple->tuple.t_len = datalen + SizeofHeapTupleHeader;
|
||||
|
||||
memset(&tuple->header, 0, sizeof(HeapTupleHeaderData));
|
||||
memset(&tuple->t_data.header, 0, SizeofHeapTupleHeader);
|
||||
|
||||
memcpy((char *) &tuple->header
|
||||
+ offsetof(HeapTupleHeaderData, t_bits),
|
||||
memcpy((char *) &tuple->t_data.header + SizeofHeapTupleHeader,
|
||||
(char *) data,
|
||||
datalen);
|
||||
data += datalen;
|
||||
|
||||
tuple->header.t_infomask = xlhdr->t_infomask;
|
||||
tuple->header.t_infomask2 = xlhdr->t_infomask2;
|
||||
tuple->header.t_hoff = xlhdr->t_hoff;
|
||||
tuple->t_data.header.t_infomask = xlhdr->t_infomask;
|
||||
tuple->t_data.header.t_infomask2 = xlhdr->t_infomask2;
|
||||
tuple->t_data.header.t_hoff = xlhdr->t_hoff;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -815,27 +813,27 @@ DecodeXLogTuple(char *data, Size len, ReorderBufferTupleBuf *tuple)
|
|||
Assert(datalen >= 0);
|
||||
Assert(datalen <= MaxHeapTupleSize);
|
||||
|
||||
tuple->tuple.t_len = datalen + offsetof(HeapTupleHeaderData, t_bits);
|
||||
tuple->tuple.t_len = datalen + SizeofHeapTupleHeader;
|
||||
|
||||
/* not a disk based tuple */
|
||||
ItemPointerSetInvalid(&tuple->tuple.t_self);
|
||||
|
||||
/* we can only figure this out after reassembling the transactions */
|
||||
tuple->tuple.t_tableOid = InvalidOid;
|
||||
tuple->tuple.t_data = &tuple->header;
|
||||
tuple->tuple.t_data = &tuple->t_data.header;
|
||||
|
||||
/* data is not stored aligned, copy to aligned storage */
|
||||
memcpy((char *) &xlhdr,
|
||||
data,
|
||||
SizeOfHeapHeader);
|
||||
|
||||
memset(&tuple->header, 0, sizeof(HeapTupleHeaderData));
|
||||
memset(&tuple->t_data.header, 0, SizeofHeapTupleHeader);
|
||||
|
||||
memcpy((char *) &tuple->header + offsetof(HeapTupleHeaderData, t_bits),
|
||||
memcpy((char *) &tuple->t_data.header + SizeofHeapTupleHeader,
|
||||
data + SizeOfHeapHeader,
|
||||
datalen);
|
||||
|
||||
tuple->header.t_infomask = xlhdr.t_infomask;
|
||||
tuple->header.t_infomask2 = xlhdr.t_infomask2;
|
||||
tuple->header.t_hoff = xlhdr.t_hoff;
|
||||
tuple->t_data.header.t_infomask = xlhdr.t_infomask;
|
||||
tuple->t_data.header.t_infomask2 = xlhdr.t_infomask2;
|
||||
tuple->t_data.header.t_hoff = xlhdr.t_hoff;
|
||||
}
|
||||
|
|
|
@ -2014,14 +2014,12 @@ ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
|
|||
newtup = change->data.tp.newtuple;
|
||||
|
||||
if (oldtup)
|
||||
oldlen = offsetof(ReorderBufferTupleBuf, data)
|
||||
+oldtup->tuple.t_len
|
||||
- offsetof(HeapTupleHeaderData, t_bits);
|
||||
oldlen = offsetof(ReorderBufferTupleBuf, t_data) +
|
||||
oldtup->tuple.t_len;
|
||||
|
||||
if (newtup)
|
||||
newlen = offsetof(ReorderBufferTupleBuf, data)
|
||||
+newtup->tuple.t_len
|
||||
- offsetof(HeapTupleHeaderData, t_bits);
|
||||
newlen = offsetof(ReorderBufferTupleBuf, t_data) +
|
||||
newtup->tuple.t_len;
|
||||
|
||||
sz += oldlen;
|
||||
sz += newlen;
|
||||
|
@ -2262,27 +2260,25 @@ ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
|
|||
case REORDER_BUFFER_CHANGE_DELETE:
|
||||
if (change->data.tp.newtuple)
|
||||
{
|
||||
Size len = offsetof(ReorderBufferTupleBuf, data)
|
||||
+((ReorderBufferTupleBuf *) data)->tuple.t_len
|
||||
- offsetof(HeapTupleHeaderData, t_bits);
|
||||
Size len = offsetof(ReorderBufferTupleBuf, t_data) +
|
||||
((ReorderBufferTupleBuf *) data)->tuple.t_len;
|
||||
|
||||
change->data.tp.newtuple = ReorderBufferGetTupleBuf(rb);
|
||||
memcpy(change->data.tp.newtuple, data, len);
|
||||
change->data.tp.newtuple->tuple.t_data =
|
||||
&change->data.tp.newtuple->header;
|
||||
&change->data.tp.newtuple->t_data.header;
|
||||
data += len;
|
||||
}
|
||||
|
||||
if (change->data.tp.oldtuple)
|
||||
{
|
||||
Size len = offsetof(ReorderBufferTupleBuf, data)
|
||||
+((ReorderBufferTupleBuf *) data)->tuple.t_len
|
||||
- offsetof(HeapTupleHeaderData, t_bits);
|
||||
Size len = offsetof(ReorderBufferTupleBuf, t_data) +
|
||||
((ReorderBufferTupleBuf *) data)->tuple.t_len;
|
||||
|
||||
change->data.tp.oldtuple = ReorderBufferGetTupleBuf(rb);
|
||||
memcpy(change->data.tp.oldtuple, data, len);
|
||||
change->data.tp.oldtuple->tuple.t_data =
|
||||
&change->data.tp.oldtuple->header;
|
||||
&change->data.tp.oldtuple->t_data.header;
|
||||
data += len;
|
||||
}
|
||||
break;
|
||||
|
@ -2660,7 +2656,7 @@ ReorderBufferToastReplace(ReorderBuffer *rb, ReorderBufferTXN *txn,
|
|||
*/
|
||||
tmphtup = heap_form_tuple(desc, attrs, isnull);
|
||||
Assert(newtup->tuple.t_len <= MaxHeapTupleSize);
|
||||
Assert(&newtup->header == newtup->tuple.t_data);
|
||||
Assert(&newtup->t_data.header == newtup->tuple.t_data);
|
||||
|
||||
memcpy(newtup->tuple.t_data, tmphtup->t_data, tmphtup->t_len);
|
||||
newtup->tuple.t_len = tmphtup->t_len;
|
||||
|
|
|
@ -84,9 +84,9 @@ suppress_redundant_updates_trigger(PG_FUNCTION_ARGS)
|
|||
HeapTupleHeaderGetNatts(oldheader)) &&
|
||||
((newheader->t_infomask & ~HEAP_XACT_MASK) ==
|
||||
(oldheader->t_infomask & ~HEAP_XACT_MASK)) &&
|
||||
memcmp(((char *) newheader) + offsetof(HeapTupleHeaderData, t_bits),
|
||||
((char *) oldheader) + offsetof(HeapTupleHeaderData, t_bits),
|
||||
newtuple->t_len - offsetof(HeapTupleHeaderData, t_bits)) == 0)
|
||||
memcmp(((char *) newheader) + SizeofHeapTupleHeader,
|
||||
((char *) oldheader) + SizeofHeapTupleHeader,
|
||||
newtuple->t_len - SizeofHeapTupleHeader) == 0)
|
||||
{
|
||||
/* ... then suppress the update */
|
||||
rettuple = NULL;
|
||||
|
|
|
@ -150,13 +150,15 @@ struct HeapTupleHeaderData
|
|||
|
||||
/* ^ - 23 bytes - ^ */
|
||||
|
||||
bits8 t_bits[1]; /* bitmap of NULLs -- VARIABLE LENGTH */
|
||||
bits8 t_bits[FLEXIBLE_ARRAY_MEMBER]; /* bitmap of NULLs */
|
||||
|
||||
/* MORE DATA FOLLOWS AT END OF STRUCT */
|
||||
};
|
||||
|
||||
/* typedef appears in tupbasics.h */
|
||||
|
||||
#define SizeofHeapTupleHeader offsetof(HeapTupleHeaderData, t_bits)
|
||||
|
||||
/*
|
||||
* information stored in t_infomask:
|
||||
*/
|
||||
|
@ -498,7 +500,7 @@ do { \
|
|||
* you can, say, fit 2 tuples of size MaxHeapTupleSize/2 on the same page.
|
||||
*/
|
||||
#define MaxHeapTupleSize (BLCKSZ - MAXALIGN(SizeOfPageHeaderData + sizeof(ItemIdData)))
|
||||
#define MinHeapTupleSize MAXALIGN(offsetof(HeapTupleHeaderData, t_bits))
|
||||
#define MinHeapTupleSize MAXALIGN(SizeofHeapTupleHeader)
|
||||
|
||||
/*
|
||||
* MaxHeapTuplesPerPage is an upper bound on the number of tuples that can
|
||||
|
@ -513,7 +515,7 @@ do { \
|
|||
*/
|
||||
#define MaxHeapTuplesPerPage \
|
||||
((int) ((BLCKSZ - SizeOfPageHeaderData) / \
|
||||
(MAXALIGN(offsetof(HeapTupleHeaderData, t_bits)) + sizeof(ItemIdData))))
|
||||
(MAXALIGN(SizeofHeapTupleHeader) + sizeof(ItemIdData))))
|
||||
|
||||
/*
|
||||
* MaxAttrSize is a somewhat arbitrary upper limit on the declared size of
|
||||
|
@ -579,13 +581,15 @@ struct MinimalTupleData
|
|||
|
||||
/* ^ - 23 bytes - ^ */
|
||||
|
||||
bits8 t_bits[1]; /* bitmap of NULLs -- VARIABLE LENGTH */
|
||||
bits8 t_bits[FLEXIBLE_ARRAY_MEMBER]; /* bitmap of NULLs */
|
||||
|
||||
/* MORE DATA FOLLOWS AT END OF STRUCT */
|
||||
};
|
||||
|
||||
/* typedef appears in htup.h */
|
||||
|
||||
#define SizeofMinimalTupleHeader offsetof(MinimalTupleData, t_bits)
|
||||
|
||||
|
||||
/*
|
||||
* GETSTRUCT - given a HeapTuple pointer, return address of the user data
|
||||
|
|
|
@ -90,7 +90,7 @@
|
|||
|
||||
#define TOAST_MAX_CHUNK_SIZE \
|
||||
(EXTERN_TUPLE_MAX_SIZE - \
|
||||
MAXALIGN(offsetof(HeapTupleHeaderData, t_bits)) - \
|
||||
MAXALIGN(SizeofHeapTupleHeader) - \
|
||||
sizeof(Oid) - \
|
||||
sizeof(int32) - \
|
||||
VARHDRSZ)
|
||||
|
|
|
@ -28,8 +28,12 @@ typedef struct ReorderBufferTupleBuf
|
|||
|
||||
/* tuple, stored sequentially */
|
||||
HeapTupleData tuple;
|
||||
HeapTupleHeaderData header;
|
||||
char data[MaxHeapTupleSize];
|
||||
union
|
||||
{
|
||||
HeapTupleHeaderData header;
|
||||
char data[MaxHeapTupleSize];
|
||||
double align_it; /* ensure t_data is MAXALIGN'd */
|
||||
} t_data;
|
||||
} ReorderBufferTupleBuf;
|
||||
|
||||
/*
|
||||
|
@ -77,7 +81,7 @@ typedef struct ReorderBufferChange
|
|||
RelFileNode relnode;
|
||||
|
||||
/* no previously reassembled toast chunks are necessary anymore */
|
||||
bool clear_toast_afterwards;
|
||||
bool clear_toast_afterwards;
|
||||
|
||||
/* valid for DELETE || UPDATE */
|
||||
ReorderBufferTupleBuf *oldtuple;
|
||||
|
|
Loading…
Reference in a new issue