pgindent run for 9.5
This commit is contained in:
parent
225892552b
commit
807b9e0dff
414 changed files with 5810 additions and 5308 deletions
|
@ -113,12 +113,12 @@ gin_btree_compare_prefix(FunctionCallInfo fcinfo)
|
|||
cmp;
|
||||
|
||||
cmp = DatumGetInt32(DirectFunctionCall2Coll(
|
||||
data->typecmp,
|
||||
PG_GET_COLLATION(),
|
||||
(data->strategy == BTLessStrategyNumber ||
|
||||
data->strategy == BTLessEqualStrategyNumber)
|
||||
? data->datum : a,
|
||||
b));
|
||||
data->typecmp,
|
||||
PG_GET_COLLATION(),
|
||||
(data->strategy == BTLessStrategyNumber ||
|
||||
data->strategy == BTLessEqualStrategyNumber)
|
||||
? data->datum : a,
|
||||
b));
|
||||
|
||||
switch (data->strategy)
|
||||
{
|
||||
|
@ -186,14 +186,14 @@ Datum \
|
|||
gin_extract_value_##type(PG_FUNCTION_ARGS) \
|
||||
{ \
|
||||
return gin_btree_extract_value(fcinfo, is_varlena); \
|
||||
} \
|
||||
} \
|
||||
PG_FUNCTION_INFO_V1(gin_extract_query_##type); \
|
||||
Datum \
|
||||
gin_extract_query_##type(PG_FUNCTION_ARGS) \
|
||||
{ \
|
||||
return gin_btree_extract_query(fcinfo, \
|
||||
is_varlena, leftmostvalue, typecmp); \
|
||||
} \
|
||||
} \
|
||||
PG_FUNCTION_INFO_V1(gin_compare_prefix_##type); \
|
||||
Datum \
|
||||
gin_compare_prefix_##type(PG_FUNCTION_ARGS) \
|
||||
|
@ -209,6 +209,7 @@ leftmostvalue_int2(void)
|
|||
{
|
||||
return Int16GetDatum(SHRT_MIN);
|
||||
}
|
||||
|
||||
GIN_SUPPORT(int2, false, leftmostvalue_int2, btint2cmp)
|
||||
|
||||
static Datum
|
||||
|
@ -216,6 +217,7 @@ leftmostvalue_int4(void)
|
|||
{
|
||||
return Int32GetDatum(INT_MIN);
|
||||
}
|
||||
|
||||
GIN_SUPPORT(int4, false, leftmostvalue_int4, btint4cmp)
|
||||
|
||||
static Datum
|
||||
|
@ -226,6 +228,7 @@ leftmostvalue_int8(void)
|
|||
*/
|
||||
return Int64GetDatum(SEQ_MINVALUE);
|
||||
}
|
||||
|
||||
GIN_SUPPORT(int8, false, leftmostvalue_int8, btint8cmp)
|
||||
|
||||
static Datum
|
||||
|
@ -233,6 +236,7 @@ leftmostvalue_float4(void)
|
|||
{
|
||||
return Float4GetDatum(-get_float4_infinity());
|
||||
}
|
||||
|
||||
GIN_SUPPORT(float4, false, leftmostvalue_float4, btfloat4cmp)
|
||||
|
||||
static Datum
|
||||
|
@ -240,6 +244,7 @@ leftmostvalue_float8(void)
|
|||
{
|
||||
return Float8GetDatum(-get_float8_infinity());
|
||||
}
|
||||
|
||||
GIN_SUPPORT(float8, false, leftmostvalue_float8, btfloat8cmp)
|
||||
|
||||
static Datum
|
||||
|
@ -250,6 +255,7 @@ leftmostvalue_money(void)
|
|||
*/
|
||||
return Int64GetDatum(SEQ_MINVALUE);
|
||||
}
|
||||
|
||||
GIN_SUPPORT(money, false, leftmostvalue_money, cash_cmp)
|
||||
|
||||
static Datum
|
||||
|
@ -257,6 +263,7 @@ leftmostvalue_oid(void)
|
|||
{
|
||||
return ObjectIdGetDatum(0);
|
||||
}
|
||||
|
||||
GIN_SUPPORT(oid, false, leftmostvalue_oid, btoidcmp)
|
||||
|
||||
static Datum
|
||||
|
@ -264,6 +271,7 @@ leftmostvalue_timestamp(void)
|
|||
{
|
||||
return TimestampGetDatum(DT_NOBEGIN);
|
||||
}
|
||||
|
||||
GIN_SUPPORT(timestamp, false, leftmostvalue_timestamp, timestamp_cmp)
|
||||
|
||||
GIN_SUPPORT(timestamptz, false, leftmostvalue_timestamp, timestamp_cmp)
|
||||
|
@ -273,6 +281,7 @@ leftmostvalue_time(void)
|
|||
{
|
||||
return TimeADTGetDatum(0);
|
||||
}
|
||||
|
||||
GIN_SUPPORT(time, false, leftmostvalue_time, time_cmp)
|
||||
|
||||
static Datum
|
||||
|
@ -285,6 +294,7 @@ leftmostvalue_timetz(void)
|
|||
|
||||
return TimeTzADTPGetDatum(v);
|
||||
}
|
||||
|
||||
GIN_SUPPORT(timetz, false, leftmostvalue_timetz, timetz_cmp)
|
||||
|
||||
static Datum
|
||||
|
@ -292,6 +302,7 @@ leftmostvalue_date(void)
|
|||
{
|
||||
return DateADTGetDatum(DATEVAL_NOBEGIN);
|
||||
}
|
||||
|
||||
GIN_SUPPORT(date, false, leftmostvalue_date, date_cmp)
|
||||
|
||||
static Datum
|
||||
|
@ -304,6 +315,7 @@ leftmostvalue_interval(void)
|
|||
v->month = 0;
|
||||
return IntervalPGetDatum(v);
|
||||
}
|
||||
|
||||
GIN_SUPPORT(interval, false, leftmostvalue_interval, interval_cmp)
|
||||
|
||||
static Datum
|
||||
|
@ -313,6 +325,7 @@ leftmostvalue_macaddr(void)
|
|||
|
||||
return MacaddrPGetDatum(v);
|
||||
}
|
||||
|
||||
GIN_SUPPORT(macaddr, false, leftmostvalue_macaddr, macaddr_cmp)
|
||||
|
||||
static Datum
|
||||
|
@ -320,6 +333,7 @@ leftmostvalue_inet(void)
|
|||
{
|
||||
return DirectFunctionCall1(inet_in, CStringGetDatum("0.0.0.0/0"));
|
||||
}
|
||||
|
||||
GIN_SUPPORT(inet, true, leftmostvalue_inet, network_cmp)
|
||||
|
||||
GIN_SUPPORT(cidr, true, leftmostvalue_inet, network_cmp)
|
||||
|
@ -329,6 +343,7 @@ leftmostvalue_text(void)
|
|||
{
|
||||
return PointerGetDatum(cstring_to_text_with_len("", 0));
|
||||
}
|
||||
|
||||
GIN_SUPPORT(text, true, leftmostvalue_text, bttextcmp)
|
||||
|
||||
static Datum
|
||||
|
@ -336,6 +351,7 @@ leftmostvalue_char(void)
|
|||
{
|
||||
return CharGetDatum(SCHAR_MIN);
|
||||
}
|
||||
|
||||
GIN_SUPPORT(char, false, leftmostvalue_char, btcharcmp)
|
||||
|
||||
GIN_SUPPORT(bytea, true, leftmostvalue_text, byteacmp)
|
||||
|
@ -348,6 +364,7 @@ leftmostvalue_bit(void)
|
|||
ObjectIdGetDatum(0),
|
||||
Int32GetDatum(-1));
|
||||
}
|
||||
|
||||
GIN_SUPPORT(bit, true, leftmostvalue_bit, bitcmp)
|
||||
|
||||
static Datum
|
||||
|
@ -358,6 +375,7 @@ leftmostvalue_varbit(void)
|
|||
ObjectIdGetDatum(0),
|
||||
Int32GetDatum(-1));
|
||||
}
|
||||
|
||||
GIN_SUPPORT(varbit, true, leftmostvalue_varbit, bitcmp)
|
||||
|
||||
/*
|
||||
|
@ -402,4 +420,5 @@ leftmostvalue_numeric(void)
|
|||
{
|
||||
return PointerGetDatum(NULL);
|
||||
}
|
||||
|
||||
GIN_SUPPORT(numeric, true, leftmostvalue_numeric, gin_numeric_cmp)
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
GISTENTRY *
|
||||
gbt_num_compress(GISTENTRY *entry, const gbtree_ninfo *tinfo)
|
||||
{
|
||||
GISTENTRY *retval;
|
||||
GISTENTRY *retval;
|
||||
|
||||
if (entry->leafkey)
|
||||
{
|
||||
|
|
|
@ -71,7 +71,7 @@ gbt_var_key_readable(const GBT_VARKEY *k)
|
|||
* Create a leaf-entry to store in the index, from a single Datum.
|
||||
*/
|
||||
static GBT_VARKEY *
|
||||
gbt_var_key_from_datum(const struct varlena *u)
|
||||
gbt_var_key_from_datum(const struct varlena * u)
|
||||
{
|
||||
int32 lowersize = VARSIZE(u);
|
||||
GBT_VARKEY *r;
|
||||
|
|
|
@ -195,7 +195,7 @@ dmetaphone_alt(PG_FUNCTION_ARGS)
|
|||
* in a case like this.
|
||||
*/
|
||||
|
||||
#define META_FREE(x) ((void)true) /* pfree((x)) */
|
||||
#define META_FREE(x) ((void)true) /* pfree((x)) */
|
||||
#else /* not defined DMETAPHONE_MAIN */
|
||||
|
||||
/* use the standard malloc library when not running in PostgreSQL */
|
||||
|
|
|
@ -72,7 +72,7 @@ typedef struct
|
|||
static pg_crc32
|
||||
crc32_sz(char *buf, int size)
|
||||
{
|
||||
pg_crc32 crc;
|
||||
pg_crc32 crc;
|
||||
|
||||
INIT_TRADITIONAL_CRC32(crc);
|
||||
COMP_TRADITIONAL_CRC32(crc, buf, size);
|
||||
|
|
|
@ -9,7 +9,7 @@ PG_MODULE_MAGIC;
|
|||
|
||||
|
||||
PG_FUNCTION_INFO_V1(hstore_to_plperl);
|
||||
Datum hstore_to_plperl(PG_FUNCTION_ARGS);
|
||||
Datum hstore_to_plperl(PG_FUNCTION_ARGS);
|
||||
|
||||
Datum
|
||||
hstore_to_plperl(PG_FUNCTION_ARGS)
|
||||
|
@ -26,10 +26,10 @@ hstore_to_plperl(PG_FUNCTION_ARGS)
|
|||
for (i = 0; i < count; i++)
|
||||
{
|
||||
const char *key;
|
||||
SV *value;
|
||||
SV *value;
|
||||
|
||||
key = pnstrdup(HS_KEY(entries, base, i), HS_KEYLEN(entries, i));
|
||||
value = HS_VALISNULL(entries, i) ? newSV(0) : cstr2sv(pnstrdup(HS_VAL(entries, base,i), HS_VALLEN(entries, i)));
|
||||
value = HS_VALISNULL(entries, i) ? newSV(0) : cstr2sv(pnstrdup(HS_VAL(entries, base, i), HS_VALLEN(entries, i)));
|
||||
|
||||
(void) hv_store(hv, key, strlen(key), value, 0);
|
||||
}
|
||||
|
@ -39,7 +39,7 @@ hstore_to_plperl(PG_FUNCTION_ARGS)
|
|||
|
||||
|
||||
PG_FUNCTION_INFO_V1(plperl_to_hstore);
|
||||
Datum plperl_to_hstore(PG_FUNCTION_ARGS);
|
||||
Datum plperl_to_hstore(PG_FUNCTION_ARGS);
|
||||
|
||||
Datum
|
||||
plperl_to_hstore(PG_FUNCTION_ARGS)
|
||||
|
@ -61,8 +61,8 @@ plperl_to_hstore(PG_FUNCTION_ARGS)
|
|||
i = 0;
|
||||
while ((he = hv_iternext(hv)))
|
||||
{
|
||||
char *key = sv2cstr(HeSVKEY_force(he));
|
||||
SV *value = HeVAL(he);
|
||||
char *key = sv2cstr(HeSVKEY_force(he));
|
||||
SV *value = HeVAL(he);
|
||||
|
||||
pairs[i].key = pstrdup(key);
|
||||
pairs[i].keylen = hstoreCheckKeyLen(strlen(pairs[i].key));
|
||||
|
|
|
@ -8,7 +8,7 @@ PG_MODULE_MAGIC;
|
|||
|
||||
|
||||
PG_FUNCTION_INFO_V1(hstore_to_plpython);
|
||||
Datum hstore_to_plpython(PG_FUNCTION_ARGS);
|
||||
Datum hstore_to_plpython(PG_FUNCTION_ARGS);
|
||||
|
||||
Datum
|
||||
hstore_to_plpython(PG_FUNCTION_ARGS)
|
||||
|
@ -31,9 +31,9 @@ hstore_to_plpython(PG_FUNCTION_ARGS)
|
|||
PyDict_SetItem(dict, key, Py_None);
|
||||
else
|
||||
{
|
||||
PyObject *value;
|
||||
PyObject *value;
|
||||
|
||||
value = PyString_FromStringAndSize(HS_VAL(entries, base,i), HS_VALLEN(entries, i));
|
||||
value = PyString_FromStringAndSize(HS_VAL(entries, base, i), HS_VALLEN(entries, i));
|
||||
PyDict_SetItem(dict, key, value);
|
||||
Py_XDECREF(value);
|
||||
}
|
||||
|
@ -45,7 +45,7 @@ hstore_to_plpython(PG_FUNCTION_ARGS)
|
|||
|
||||
|
||||
PG_FUNCTION_INFO_V1(plpython_to_hstore);
|
||||
Datum plpython_to_hstore(PG_FUNCTION_ARGS);
|
||||
Datum plpython_to_hstore(PG_FUNCTION_ARGS);
|
||||
|
||||
Datum
|
||||
plpython_to_hstore(PG_FUNCTION_ARGS)
|
||||
|
@ -75,9 +75,9 @@ plpython_to_hstore(PG_FUNCTION_ARGS)
|
|||
|
||||
for (i = 0; i < pcount; i++)
|
||||
{
|
||||
PyObject *tuple;
|
||||
PyObject *key;
|
||||
PyObject *value;
|
||||
PyObject *tuple;
|
||||
PyObject *key;
|
||||
PyObject *value;
|
||||
|
||||
tuple = PyList_GetItem(items, i);
|
||||
key = PyTuple_GetItem(tuple, 0);
|
||||
|
|
|
@ -26,13 +26,14 @@
|
|||
unsigned int
|
||||
ltree_crc32_sz(char *buf, int size)
|
||||
{
|
||||
pg_crc32 crc;
|
||||
pg_crc32 crc;
|
||||
char *p = buf;
|
||||
|
||||
INIT_TRADITIONAL_CRC32(crc);
|
||||
while (size > 0)
|
||||
{
|
||||
char c = (char) TOLOWER(*p);
|
||||
char c = (char) TOLOWER(*p);
|
||||
|
||||
COMP_TRADITIONAL_CRC32(crc, &c, 1);
|
||||
size--;
|
||||
p++;
|
||||
|
|
|
@ -7,7 +7,7 @@ PG_MODULE_MAGIC;
|
|||
|
||||
|
||||
PG_FUNCTION_INFO_V1(ltree_to_plpython);
|
||||
Datum ltree_to_plpython(PG_FUNCTION_ARGS);
|
||||
Datum ltree_to_plpython(PG_FUNCTION_ARGS);
|
||||
|
||||
Datum
|
||||
ltree_to_plpython(PG_FUNCTION_ARGS)
|
||||
|
|
|
@ -58,7 +58,7 @@ brin_page_type(PG_FUNCTION_ARGS)
|
|||
{
|
||||
bytea *raw_page = PG_GETARG_BYTEA_P(0);
|
||||
Page page = VARDATA(raw_page);
|
||||
char *type;
|
||||
char *type;
|
||||
|
||||
switch (BrinPageType(page))
|
||||
{
|
||||
|
@ -86,8 +86,8 @@ brin_page_type(PG_FUNCTION_ARGS)
|
|||
static Page
|
||||
verify_brin_page(bytea *raw_page, uint16 type, const char *strtype)
|
||||
{
|
||||
Page page;
|
||||
int raw_page_size;
|
||||
Page page;
|
||||
int raw_page_size;
|
||||
|
||||
raw_page_size = VARSIZE(raw_page) - VARHDRSZ;
|
||||
|
||||
|
@ -95,7 +95,7 @@ verify_brin_page(bytea *raw_page, uint16 type, const char *strtype)
|
|||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("input page too small"),
|
||||
errdetail("Expected size %d, got %d", raw_page_size, BLCKSZ)));
|
||||
errdetail("Expected size %d, got %d", raw_page_size, BLCKSZ)));
|
||||
|
||||
page = VARDATA(raw_page);
|
||||
|
||||
|
@ -153,7 +153,7 @@ brin_page_items(PG_FUNCTION_ARGS)
|
|||
indexRel = index_open(indexRelid, AccessShareLock);
|
||||
|
||||
state = palloc(offsetof(brin_page_state, columns) +
|
||||
sizeof(brin_column_state) * RelationGetDescr(indexRel)->natts);
|
||||
sizeof(brin_column_state) * RelationGetDescr(indexRel)->natts);
|
||||
|
||||
state->bdesc = brin_build_desc(indexRel);
|
||||
state->page = page;
|
||||
|
@ -168,10 +168,10 @@ brin_page_items(PG_FUNCTION_ARGS)
|
|||
*/
|
||||
for (attno = 1; attno <= state->bdesc->bd_tupdesc->natts; attno++)
|
||||
{
|
||||
Oid output;
|
||||
bool isVarlena;
|
||||
Oid output;
|
||||
bool isVarlena;
|
||||
BrinOpcInfo *opcinfo;
|
||||
int i;
|
||||
int i;
|
||||
brin_column_state *column;
|
||||
|
||||
opcinfo = state->bdesc->bd_info[attno - 1];
|
||||
|
@ -213,7 +213,7 @@ brin_page_items(PG_FUNCTION_ARGS)
|
|||
*/
|
||||
if (state->dtup == NULL)
|
||||
{
|
||||
BrinTuple *tup;
|
||||
BrinTuple *tup;
|
||||
MemoryContext mctx;
|
||||
ItemId itemId;
|
||||
|
||||
|
@ -225,8 +225,8 @@ brin_page_items(PG_FUNCTION_ARGS)
|
|||
if (ItemIdIsUsed(itemId))
|
||||
{
|
||||
tup = (BrinTuple *) PageGetItem(state->page,
|
||||
PageGetItemId(state->page,
|
||||
state->offset));
|
||||
PageGetItemId(state->page,
|
||||
state->offset));
|
||||
state->dtup = brin_deform_tuple(state->bdesc, tup);
|
||||
state->attno = 1;
|
||||
state->unusedItem = false;
|
||||
|
@ -253,7 +253,7 @@ brin_page_items(PG_FUNCTION_ARGS)
|
|||
}
|
||||
else
|
||||
{
|
||||
int att = state->attno - 1;
|
||||
int att = state->attno - 1;
|
||||
|
||||
values[0] = UInt16GetDatum(state->offset);
|
||||
values[1] = UInt32GetDatum(state->dtup->bt_blkno);
|
||||
|
@ -263,8 +263,8 @@ brin_page_items(PG_FUNCTION_ARGS)
|
|||
values[5] = BoolGetDatum(state->dtup->bt_placeholder);
|
||||
if (!state->dtup->bt_columns[att].bv_allnulls)
|
||||
{
|
||||
BrinValues *bvalues = &state->dtup->bt_columns[att];
|
||||
StringInfoData s;
|
||||
BrinValues *bvalues = &state->dtup->bt_columns[att];
|
||||
StringInfoData s;
|
||||
bool first;
|
||||
int i;
|
||||
|
||||
|
@ -274,7 +274,7 @@ brin_page_items(PG_FUNCTION_ARGS)
|
|||
first = true;
|
||||
for (i = 0; i < state->columns[att]->nstored; i++)
|
||||
{
|
||||
char *val;
|
||||
char *val;
|
||||
|
||||
if (!first)
|
||||
appendStringInfoString(&s, " .. ");
|
||||
|
@ -312,8 +312,8 @@ brin_page_items(PG_FUNCTION_ARGS)
|
|||
}
|
||||
|
||||
/*
|
||||
* If we're beyond the end of the page, set flag to end the function in
|
||||
* the following iteration.
|
||||
* If we're beyond the end of the page, set flag to end the function
|
||||
* in the following iteration.
|
||||
*/
|
||||
if (state->offset > PageGetMaxOffsetNumber(state->page))
|
||||
state->done = true;
|
||||
|
@ -366,8 +366,8 @@ brin_revmap_data(PG_FUNCTION_ARGS)
|
|||
struct
|
||||
{
|
||||
ItemPointerData *tids;
|
||||
int idx;
|
||||
} *state;
|
||||
int idx;
|
||||
} *state;
|
||||
FuncCallContext *fctx;
|
||||
|
||||
if (!superuser())
|
||||
|
|
|
@ -167,7 +167,7 @@ typedef struct gin_leafpage_items_state
|
|||
TupleDesc tupd;
|
||||
GinPostingList *seg;
|
||||
GinPostingList *lastseg;
|
||||
} gin_leafpage_items_state;
|
||||
} gin_leafpage_items_state;
|
||||
|
||||
Datum
|
||||
gin_leafpage_items(PG_FUNCTION_ARGS)
|
||||
|
|
|
@ -40,11 +40,11 @@
|
|||
|
||||
PG_MODULE_MAGIC;
|
||||
|
||||
void _PG_init(void);
|
||||
void _PG_init(void);
|
||||
|
||||
/* Prototypes for functions used with event triggers */
|
||||
Datum pg_audit_ddl_command_end(PG_FUNCTION_ARGS);
|
||||
Datum pg_audit_sql_drop(PG_FUNCTION_ARGS);
|
||||
Datum pg_audit_ddl_command_end(PG_FUNCTION_ARGS);
|
||||
Datum pg_audit_sql_drop(PG_FUNCTION_ARGS);
|
||||
|
||||
PG_FUNCTION_INFO_V1(pg_audit_ddl_command_end);
|
||||
PG_FUNCTION_INFO_V1(pg_audit_sql_drop);
|
||||
|
@ -67,14 +67,14 @@ PG_FUNCTION_INFO_V1(pg_audit_sql_drop);
|
|||
#define LOG_ROLE (1 << 4) /* GRANT/REVOKE, CREATE/ALTER/DROP ROLE */
|
||||
#define LOG_WRITE (1 << 5) /* INSERT, UPDATE, DELETE, TRUNCATE */
|
||||
|
||||
#define LOG_NONE 0 /* nothing */
|
||||
#define LOG_NONE 0 /* nothing */
|
||||
#define LOG_ALL (0xFFFFFFFF) /* All */
|
||||
|
||||
/* GUC variable for pg_audit.log, which defines the classes to log. */
|
||||
char *auditLog = NULL;
|
||||
char *auditLog = NULL;
|
||||
|
||||
/* Bitmap of classes selected */
|
||||
static int auditLogBitmap = LOG_NONE;
|
||||
static int auditLogBitmap = LOG_NONE;
|
||||
|
||||
/*
|
||||
* String constants for log classes - used when processing tokens in the
|
||||
|
@ -97,7 +97,7 @@ static int auditLogBitmap = LOG_NONE;
|
|||
* the query are in pg_catalog. Interactive sessions (eg: psql) can cause
|
||||
* a lot of noise in the logs which might be uninteresting.
|
||||
*/
|
||||
bool auditLogCatalog = true;
|
||||
bool auditLogCatalog = true;
|
||||
|
||||
/*
|
||||
* GUC variable for pg_audit.log_level
|
||||
|
@ -106,8 +106,8 @@ bool auditLogCatalog = true;
|
|||
* at. The default level is LOG, which goes into the server log but does
|
||||
* not go to the client. Set to NOTICE in the regression tests.
|
||||
*/
|
||||
char *auditLogLevelString = NULL;
|
||||
int auditLogLevel = LOG;
|
||||
char *auditLogLevelString = NULL;
|
||||
int auditLogLevel = LOG;
|
||||
|
||||
/*
|
||||
* GUC variable for pg_audit.log_parameter
|
||||
|
@ -115,7 +115,7 @@ int auditLogLevel = LOG;
|
|||
* Administrators can choose if parameters passed into a statement are
|
||||
* included in the audit log.
|
||||
*/
|
||||
bool auditLogParameter = false;
|
||||
bool auditLogParameter = false;
|
||||
|
||||
/*
|
||||
* GUC variable for pg_audit.log_relation
|
||||
|
@ -124,7 +124,7 @@ bool auditLogParameter = false;
|
|||
* in READ/WRITE class queries. By default, SESSION logs include the query but
|
||||
* do not have a log entry for each relation.
|
||||
*/
|
||||
bool auditLogRelation = false;
|
||||
bool auditLogRelation = false;
|
||||
|
||||
/*
|
||||
* GUC variable for pg_audit.log_statement_once
|
||||
|
@ -134,7 +134,7 @@ bool auditLogRelation = false;
|
|||
* the audit log to facilitate searching, but this can cause the log to be
|
||||
* unnecessairly bloated in some environments.
|
||||
*/
|
||||
bool auditLogStatementOnce = false;
|
||||
bool auditLogStatementOnce = false;
|
||||
|
||||
/*
|
||||
* GUC variable for pg_audit.role
|
||||
|
@ -143,7 +143,7 @@ bool auditLogStatementOnce = false;
|
|||
* Object-level auditing uses the privileges which are granted to this role to
|
||||
* determine if a statement should be logged.
|
||||
*/
|
||||
char *auditRole = NULL;
|
||||
char *auditRole = NULL;
|
||||
|
||||
/*
|
||||
* String constants for the audit log fields.
|
||||
|
@ -213,23 +213,23 @@ char *auditRole = NULL;
|
|||
*/
|
||||
typedef struct
|
||||
{
|
||||
int64 statementId; /* Simple counter */
|
||||
int64 substatementId; /* Simple counter */
|
||||
int64 statementId; /* Simple counter */
|
||||
int64 substatementId; /* Simple counter */
|
||||
|
||||
LogStmtLevel logStmtLevel; /* From GetCommandLogLevel when possible, */
|
||||
/* generated when not. */
|
||||
NodeTag commandTag; /* same here */
|
||||
/* generated when not. */
|
||||
NodeTag commandTag; /* same here */
|
||||
const char *command; /* same here */
|
||||
const char *objectType; /* From event trigger when possible */
|
||||
/* generated when not. */
|
||||
char *objectName; /* Fully qualified object identification */
|
||||
/* generated when not. */
|
||||
char *objectName; /* Fully qualified object identification */
|
||||
const char *commandText; /* sourceText / queryString */
|
||||
ParamListInfo paramList; /* QueryDesc/ProcessUtility parameters */
|
||||
|
||||
bool granted; /* Audit role has object permissions? */
|
||||
bool logged; /* Track if we have logged this event, used */
|
||||
/* post-ProcessUtility to make sure we log */
|
||||
bool statementLogged; /* Track if we have logged the statement */
|
||||
bool granted; /* Audit role has object permissions? */
|
||||
bool logged; /* Track if we have logged this event, used */
|
||||
/* post-ProcessUtility to make sure we log */
|
||||
bool statementLogged; /* Track if we have logged the statement */
|
||||
} AuditEvent;
|
||||
|
||||
/*
|
||||
|
@ -239,9 +239,9 @@ typedef struct AuditEventStackItem
|
|||
{
|
||||
struct AuditEventStackItem *next;
|
||||
|
||||
AuditEvent auditEvent;
|
||||
AuditEvent auditEvent;
|
||||
|
||||
int64 stackId;
|
||||
int64 stackId;
|
||||
|
||||
MemoryContext contextAudit;
|
||||
MemoryContextCallback contextCallback;
|
||||
|
@ -288,7 +288,7 @@ stack_free(void *stackFree)
|
|||
while (nextItem != NULL)
|
||||
{
|
||||
/* Check if this item matches the item to be freed */
|
||||
if (nextItem == (AuditEventStackItem *)stackFree)
|
||||
if (nextItem == (AuditEventStackItem *) stackFree)
|
||||
{
|
||||
/* Move top of stack to the item after the freed item */
|
||||
auditEventStack = nextItem->next;
|
||||
|
@ -309,7 +309,8 @@ stack_free(void *stackFree)
|
|||
substatementTotal = 0;
|
||||
|
||||
/*
|
||||
* Reset statement logged so that next statement will be logged.
|
||||
* Reset statement logged so that next statement will be
|
||||
* logged.
|
||||
*/
|
||||
statementLogged = false;
|
||||
}
|
||||
|
@ -356,7 +357,7 @@ stack_push()
|
|||
* the stack at this item.
|
||||
*/
|
||||
stackItem->contextCallback.func = stack_free;
|
||||
stackItem->contextCallback.arg = (void *)stackItem;
|
||||
stackItem->contextCallback.arg = (void *) stackItem;
|
||||
MemoryContextRegisterResetCallback(contextAudit,
|
||||
&stackItem->contextCallback);
|
||||
|
||||
|
@ -431,7 +432,7 @@ append_valid_csv(StringInfoData *buffer, const char *appendStr)
|
|||
|
||||
for (pChar = appendStr; *pChar; pChar++)
|
||||
{
|
||||
if (*pChar == '"') /* double single quotes */
|
||||
if (*pChar == '"') /* double single quotes */
|
||||
appendStringInfoCharMacro(buffer, *pChar);
|
||||
|
||||
appendStringInfoCharMacro(buffer, *pChar);
|
||||
|
@ -461,23 +462,23 @@ static void
|
|||
log_audit_event(AuditEventStackItem *stackItem)
|
||||
{
|
||||
/* By default, put everything in the MISC class. */
|
||||
int class = LOG_MISC;
|
||||
const char *className = CLASS_MISC;
|
||||
MemoryContext contextOld;
|
||||
StringInfoData auditStr;
|
||||
int class = LOG_MISC;
|
||||
const char *className = CLASS_MISC;
|
||||
MemoryContext contextOld;
|
||||
StringInfoData auditStr;
|
||||
|
||||
|
||||
/* Classify the statement using log stmt level and the command tag */
|
||||
switch (stackItem->auditEvent.logStmtLevel)
|
||||
{
|
||||
/* All mods go in WRITE class, execpt EXECUTE */
|
||||
/* All mods go in WRITE class, execpt EXECUTE */
|
||||
case LOGSTMT_MOD:
|
||||
className = CLASS_WRITE;
|
||||
class = LOG_WRITE;
|
||||
|
||||
switch (stackItem->auditEvent.commandTag)
|
||||
{
|
||||
/* Currently, only EXECUTE is different */
|
||||
/* Currently, only EXECUTE is different */
|
||||
case T_ExecuteStmt:
|
||||
className = CLASS_MISC;
|
||||
class = LOG_MISC;
|
||||
|
@ -487,7 +488,7 @@ log_audit_event(AuditEventStackItem *stackItem)
|
|||
}
|
||||
break;
|
||||
|
||||
/* These are DDL, unless they are ROLE */
|
||||
/* These are DDL, unless they are ROLE */
|
||||
case LOGSTMT_DDL:
|
||||
className = CLASS_DDL;
|
||||
class = LOG_DDL;
|
||||
|
@ -495,7 +496,7 @@ log_audit_event(AuditEventStackItem *stackItem)
|
|||
/* Identify role statements */
|
||||
switch (stackItem->auditEvent.commandTag)
|
||||
{
|
||||
/* We know these are all role statements */
|
||||
/* We know these are all role statements */
|
||||
case T_GrantStmt:
|
||||
case T_GrantRoleStmt:
|
||||
case T_CreateRoleStmt:
|
||||
|
@ -505,11 +506,12 @@ log_audit_event(AuditEventStackItem *stackItem)
|
|||
className = CLASS_ROLE;
|
||||
class = LOG_ROLE;
|
||||
break;
|
||||
/*
|
||||
* Rename and Drop are general and therefore we have to do an
|
||||
* additional check against the command string to see if they
|
||||
* are role or regular DDL.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Rename and Drop are general and therefore we have to do
|
||||
* an additional check against the command string to see
|
||||
* if they are role or regular DDL.
|
||||
*/
|
||||
case T_RenameStmt:
|
||||
case T_DropStmt:
|
||||
if (pg_strcasecmp(stackItem->auditEvent.command,
|
||||
|
@ -527,11 +529,11 @@ log_audit_event(AuditEventStackItem *stackItem)
|
|||
}
|
||||
break;
|
||||
|
||||
/* Classify the rest */
|
||||
/* Classify the rest */
|
||||
case LOGSTMT_ALL:
|
||||
switch (stackItem->auditEvent.commandTag)
|
||||
{
|
||||
/* READ statements */
|
||||
/* READ statements */
|
||||
case T_CopyStmt:
|
||||
case T_SelectStmt:
|
||||
case T_PrepareStmt:
|
||||
|
@ -540,7 +542,7 @@ log_audit_event(AuditEventStackItem *stackItem)
|
|||
class = LOG_READ;
|
||||
break;
|
||||
|
||||
/* FUNCTION statements */
|
||||
/* FUNCTION statements */
|
||||
case T_DoStmt:
|
||||
className = CLASS_FUNCTION;
|
||||
class = LOG_FUNCTION;
|
||||
|
@ -558,8 +560,8 @@ log_audit_event(AuditEventStackItem *stackItem)
|
|||
/*
|
||||
* Only log the statement if:
|
||||
*
|
||||
* 1. If object was selected for audit logging (granted)
|
||||
* 2. The statement belongs to a class that is being logged
|
||||
* 1. If object was selected for audit logging (granted) 2. The statement
|
||||
* belongs to a class that is being logged
|
||||
*
|
||||
* If neither of these is true, return.
|
||||
*/
|
||||
|
@ -615,10 +617,10 @@ log_audit_event(AuditEventStackItem *stackItem)
|
|||
/* Handle parameter logging, if enabled. */
|
||||
if (auditLogParameter)
|
||||
{
|
||||
int paramIdx;
|
||||
int numParams;
|
||||
StringInfoData paramStrResult;
|
||||
ParamListInfo paramList = stackItem->auditEvent.paramList;
|
||||
int paramIdx;
|
||||
int numParams;
|
||||
StringInfoData paramStrResult;
|
||||
ParamListInfo paramList = stackItem->auditEvent.paramList;
|
||||
|
||||
numParams = paramList == NULL ? 0 : paramList->numParams;
|
||||
|
||||
|
@ -630,9 +632,9 @@ log_audit_event(AuditEventStackItem *stackItem)
|
|||
paramIdx++)
|
||||
{
|
||||
ParamExternData *prm = ¶mList->params[paramIdx];
|
||||
Oid typeOutput;
|
||||
bool typeIsVarLena;
|
||||
char *paramStr;
|
||||
Oid typeOutput;
|
||||
bool typeIsVarLena;
|
||||
char *paramStr;
|
||||
|
||||
/* Add a comma for each param */
|
||||
if (paramIdx != 0)
|
||||
|
@ -663,7 +665,7 @@ log_audit_event(AuditEventStackItem *stackItem)
|
|||
else
|
||||
/* we were asked to not log it */
|
||||
appendStringInfoString(&auditStr,
|
||||
"<previously logged>,<previously logged>");
|
||||
"<previously logged>,<previously logged>");
|
||||
|
||||
/*
|
||||
* Log the audit entry. Note: use of INT64_FORMAT here is bad for
|
||||
|
@ -696,7 +698,7 @@ audit_on_acl(Datum aclDatum,
|
|||
{
|
||||
bool result = false;
|
||||
Acl *acl;
|
||||
AclItem *aclItemData;
|
||||
AclItem *aclItemData;
|
||||
int aclIndex;
|
||||
int aclTotal;
|
||||
|
||||
|
@ -710,7 +712,7 @@ audit_on_acl(Datum aclDatum,
|
|||
/* Check privileges granted directly to auditOid */
|
||||
for (aclIndex = 0; aclIndex < aclTotal; aclIndex++)
|
||||
{
|
||||
AclItem *aclItem = &aclItemData[aclIndex];
|
||||
AclItem *aclItem = &aclItemData[aclIndex];
|
||||
|
||||
if (aclItem->ai_grantee == auditOid &&
|
||||
aclItem->ai_privs & mask)
|
||||
|
@ -731,7 +733,7 @@ audit_on_acl(Datum aclDatum,
|
|||
{
|
||||
for (aclIndex = 0; aclIndex < aclTotal; aclIndex++)
|
||||
{
|
||||
AclItem *aclItem = &aclItemData[aclIndex];
|
||||
AclItem *aclItem = &aclItemData[aclIndex];
|
||||
|
||||
/* Don't test public or auditOid (it has been tested already) */
|
||||
if (aclItem->ai_grantee == ACL_ID_PUBLIC ||
|
||||
|
@ -838,9 +840,9 @@ audit_on_any_attribute(Oid relOid,
|
|||
Bitmapset *attributeSet,
|
||||
AclMode mode)
|
||||
{
|
||||
bool result = false;
|
||||
AttrNumber col;
|
||||
Bitmapset *tmpSet;
|
||||
bool result = false;
|
||||
AttrNumber col;
|
||||
Bitmapset *tmpSet;
|
||||
|
||||
/* If bms is empty then check for any column match */
|
||||
if (bms_is_empty(attributeSet))
|
||||
|
@ -891,9 +893,9 @@ audit_on_any_attribute(Oid relOid,
|
|||
static void
|
||||
log_select_dml(Oid auditOid, List *rangeTabls)
|
||||
{
|
||||
ListCell *lr;
|
||||
bool first = true;
|
||||
bool found = false;
|
||||
ListCell *lr;
|
||||
bool first = true;
|
||||
bool found = false;
|
||||
|
||||
/* Do not log if this is an internal statement */
|
||||
if (internalStatement)
|
||||
|
@ -901,8 +903,8 @@ log_select_dml(Oid auditOid, List *rangeTabls)
|
|||
|
||||
foreach(lr, rangeTabls)
|
||||
{
|
||||
Oid relOid;
|
||||
Relation rel;
|
||||
Oid relOid;
|
||||
Relation rel;
|
||||
RangeTblEntry *rte = lfirst(lr);
|
||||
|
||||
/* We only care about tables, and can ignore subqueries etc. */
|
||||
|
@ -912,8 +914,8 @@ log_select_dml(Oid auditOid, List *rangeTabls)
|
|||
found = true;
|
||||
|
||||
/*
|
||||
* If we are not logging all-catalog queries (auditLogCatalog is false)
|
||||
* then filter out any system relations here.
|
||||
* If we are not logging all-catalog queries (auditLogCatalog is
|
||||
* false) then filter out any system relations here.
|
||||
*/
|
||||
relOid = rte->relid;
|
||||
rel = relation_open(relOid, NoLock);
|
||||
|
@ -982,63 +984,72 @@ log_select_dml(Oid auditOid, List *rangeTabls)
|
|||
{
|
||||
case RELKIND_RELATION:
|
||||
auditEventStack->auditEvent.objectType =
|
||||
OBJECT_TYPE_TABLE;
|
||||
OBJECT_TYPE_TABLE;
|
||||
|
||||
break;
|
||||
|
||||
case RELKIND_INDEX:
|
||||
auditEventStack->auditEvent.objectType =
|
||||
OBJECT_TYPE_INDEX;
|
||||
OBJECT_TYPE_INDEX;
|
||||
|
||||
break;
|
||||
|
||||
case RELKIND_SEQUENCE:
|
||||
auditEventStack->auditEvent.objectType =
|
||||
OBJECT_TYPE_SEQUENCE;
|
||||
OBJECT_TYPE_SEQUENCE;
|
||||
|
||||
break;
|
||||
|
||||
case RELKIND_TOASTVALUE:
|
||||
auditEventStack->auditEvent.objectType =
|
||||
OBJECT_TYPE_TOASTVALUE;
|
||||
OBJECT_TYPE_TOASTVALUE;
|
||||
|
||||
break;
|
||||
|
||||
case RELKIND_VIEW:
|
||||
auditEventStack->auditEvent.objectType =
|
||||
OBJECT_TYPE_VIEW;
|
||||
OBJECT_TYPE_VIEW;
|
||||
|
||||
break;
|
||||
|
||||
case RELKIND_COMPOSITE_TYPE:
|
||||
auditEventStack->auditEvent.objectType =
|
||||
OBJECT_TYPE_COMPOSITE_TYPE;
|
||||
OBJECT_TYPE_COMPOSITE_TYPE;
|
||||
|
||||
break;
|
||||
|
||||
case RELKIND_FOREIGN_TABLE:
|
||||
auditEventStack->auditEvent.objectType =
|
||||
OBJECT_TYPE_FOREIGN_TABLE;
|
||||
OBJECT_TYPE_FOREIGN_TABLE;
|
||||
|
||||
break;
|
||||
|
||||
case RELKIND_MATVIEW:
|
||||
auditEventStack->auditEvent.objectType =
|
||||
OBJECT_TYPE_MATVIEW;
|
||||
OBJECT_TYPE_MATVIEW;
|
||||
|
||||
break;
|
||||
|
||||
default:
|
||||
auditEventStack->auditEvent.objectType =
|
||||
OBJECT_TYPE_UNKNOWN;
|
||||
OBJECT_TYPE_UNKNOWN;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
/* Get a copy of the relation name and assign it to object name */
|
||||
auditEventStack->auditEvent.objectName =
|
||||
quote_qualified_identifier(get_namespace_name(
|
||||
RelationGetNamespace(rel)),
|
||||
RelationGetNamespace(rel)),
|
||||
RelationGetRelationName(rel));
|
||||
relation_close(rel, NoLock);
|
||||
|
||||
/* Perform object auditing only if the audit role is valid */
|
||||
if (auditOid != InvalidOid)
|
||||
{
|
||||
AclMode auditPerms =
|
||||
(ACL_SELECT | ACL_UPDATE | ACL_INSERT | ACL_DELETE) &
|
||||
rte->requiredPerms;
|
||||
AclMode auditPerms =
|
||||
(ACL_SELECT | ACL_UPDATE | ACL_INSERT | ACL_DELETE) &
|
||||
rte->requiredPerms;
|
||||
|
||||
/*
|
||||
* If any of the required permissions for the relation are granted
|
||||
|
@ -1104,8 +1115,8 @@ log_select_dml(Oid auditOid, List *rangeTabls)
|
|||
|
||||
/*
|
||||
* If no tables were found that means that RangeTbls was empty or all
|
||||
* relations were in the system schema. In that case still log a
|
||||
* session record.
|
||||
* relations were in the system schema. In that case still log a session
|
||||
* record.
|
||||
*/
|
||||
if (!found)
|
||||
{
|
||||
|
@ -1123,7 +1134,7 @@ log_select_dml(Oid auditOid, List *rangeTabls)
|
|||
static void
|
||||
log_function_execute(Oid objectId)
|
||||
{
|
||||
HeapTuple proctup;
|
||||
HeapTuple proctup;
|
||||
Form_pg_proc proc;
|
||||
AuditEventStackItem *stackItem;
|
||||
|
||||
|
@ -1159,6 +1170,7 @@ log_function_execute(Oid objectId)
|
|||
stackItem->auditEvent.commandTag = T_DoStmt;
|
||||
stackItem->auditEvent.command = COMMAND_EXECUTE;
|
||||
stackItem->auditEvent.objectType = OBJECT_TYPE_FUNCTION;
|
||||
|
||||
stackItem->auditEvent.commandText = stackItem->next->auditEvent.commandText;
|
||||
|
||||
log_audit_event(stackItem);
|
||||
|
@ -1236,9 +1248,9 @@ pg_audit_ExecutorStart_hook(QueryDesc *queryDesc, int eflags)
|
|||
standard_ExecutorStart(queryDesc, eflags);
|
||||
|
||||
/*
|
||||
* Move the stack memory context to the query memory context. This needs to
|
||||
* be done here because the query context does not exist before the call
|
||||
* to standard_ExecutorStart() but the stack item is required by
|
||||
* Move the stack memory context to the query memory context. This needs
|
||||
* to be done here because the query context does not exist before the
|
||||
* call to standard_ExecutorStart() but the stack item is required by
|
||||
* pg_audit_ExecutorCheckPerms_hook() which is called during
|
||||
* standard_ExecutorStart().
|
||||
*/
|
||||
|
@ -1253,7 +1265,7 @@ pg_audit_ExecutorStart_hook(QueryDesc *queryDesc, int eflags)
|
|||
static bool
|
||||
pg_audit_ExecutorCheckPerms_hook(List *rangeTabls, bool abort)
|
||||
{
|
||||
Oid auditOid;
|
||||
Oid auditOid;
|
||||
|
||||
/* Get the audit oid if the role exists */
|
||||
auditOid = get_role_oid(auditRole, true);
|
||||
|
@ -1283,7 +1295,7 @@ pg_audit_ProcessUtility_hook(Node *parsetree,
|
|||
char *completionTag)
|
||||
{
|
||||
AuditEventStackItem *stackItem = NULL;
|
||||
int64 stackId = 0;
|
||||
int64 stackId = 0;
|
||||
|
||||
/*
|
||||
* Don't audit substatements. All the substatements we care about should
|
||||
|
@ -1328,19 +1340,22 @@ pg_audit_ProcessUtility_hook(Node *parsetree,
|
|||
params, dest, completionTag);
|
||||
|
||||
/*
|
||||
* Process the audit event if there is one. Also check that this event was
|
||||
* not popped off the stack by a memory context being free'd elsewhere.
|
||||
* Process the audit event if there is one. Also check that this event
|
||||
* was not popped off the stack by a memory context being free'd
|
||||
* elsewhere.
|
||||
*/
|
||||
if (stackItem && !IsAbortedTransactionBlockState())
|
||||
{
|
||||
/*
|
||||
* Make sure the item we want to log is still on the stack - if not then
|
||||
* something has gone wrong and an error will be raised.
|
||||
* Make sure the item we want to log is still on the stack - if not
|
||||
* then something has gone wrong and an error will be raised.
|
||||
*/
|
||||
stack_valid(stackId);
|
||||
|
||||
/* Log the utility command if logging is on, the command has not already
|
||||
* been logged by another hook, and the transaction is not aborted.
|
||||
/*
|
||||
* Log the utility command if logging is on, the command has not
|
||||
* already been logged by another hook, and the transaction is not
|
||||
* aborted.
|
||||
*/
|
||||
if (auditLogBitmap != 0 && !stackItem->auditEvent.logged)
|
||||
log_audit_event(stackItem);
|
||||
|
@ -1380,11 +1395,12 @@ Datum
|
|||
pg_audit_ddl_command_end(PG_FUNCTION_ARGS)
|
||||
{
|
||||
EventTriggerData *eventData;
|
||||
int result, row;
|
||||
TupleDesc spiTupDesc;
|
||||
const char *query;
|
||||
MemoryContext contextQuery;
|
||||
MemoryContext contextOld;
|
||||
int result,
|
||||
row;
|
||||
TupleDesc spiTupDesc;
|
||||
const char *query;
|
||||
MemoryContext contextQuery;
|
||||
MemoryContext contextOld;
|
||||
|
||||
/* Continue only if session DDL logging is enabled */
|
||||
if (~auditLogBitmap & LOG_DDL)
|
||||
|
@ -1393,7 +1409,7 @@ pg_audit_ddl_command_end(PG_FUNCTION_ARGS)
|
|||
/* Be sure the module was loaded */
|
||||
if (!auditEventStack)
|
||||
elog(ERROR, "pg_audit not loaded before call to "
|
||||
"pg_audit_ddl_command_end()");
|
||||
"pg_audit_ddl_command_end()");
|
||||
|
||||
/* This is an internal statement - do not log it */
|
||||
internalStatement = true;
|
||||
|
@ -1404,11 +1420,11 @@ pg_audit_ddl_command_end(PG_FUNCTION_ARGS)
|
|||
|
||||
/* Switch memory context for query */
|
||||
contextQuery = AllocSetContextCreate(
|
||||
CurrentMemoryContext,
|
||||
"pg_audit_func_ddl_command_end temporary context",
|
||||
ALLOCSET_DEFAULT_MINSIZE,
|
||||
ALLOCSET_DEFAULT_INITSIZE,
|
||||
ALLOCSET_DEFAULT_MAXSIZE);
|
||||
CurrentMemoryContext,
|
||||
"pg_audit_func_ddl_command_end temporary context",
|
||||
ALLOCSET_DEFAULT_MINSIZE,
|
||||
ALLOCSET_DEFAULT_INITSIZE,
|
||||
ALLOCSET_DEFAULT_MAXSIZE);
|
||||
contextOld = MemoryContextSwitchTo(contextQuery);
|
||||
|
||||
/* Get information about triggered events */
|
||||
|
@ -1423,31 +1439,32 @@ pg_audit_ddl_command_end(PG_FUNCTION_ARGS)
|
|||
|
||||
/* Return objects affected by the (non drop) DDL statement */
|
||||
query = "SELECT UPPER(object_type), object_identity\n"
|
||||
" FROM pg_event_trigger_ddl_commands()";
|
||||
" FROM pg_event_trigger_ddl_commands()";
|
||||
|
||||
/* Attempt to connect */
|
||||
result = SPI_connect();
|
||||
if (result < 0)
|
||||
elog(ERROR, "pg_audit_ddl_command_end: SPI_connect returned %d",
|
||||
result);
|
||||
result);
|
||||
|
||||
/* Execute the query */
|
||||
result = SPI_execute(query, true, 0);
|
||||
if (result != SPI_OK_SELECT)
|
||||
elog(ERROR, "pg_audit_ddl_command_end: SPI_execute returned %d",
|
||||
result);
|
||||
result);
|
||||
|
||||
/* Iterate returned rows */
|
||||
spiTupDesc = SPI_tuptable->tupdesc;
|
||||
for (row = 0; row < SPI_processed; row++)
|
||||
{
|
||||
HeapTuple spiTuple;
|
||||
HeapTuple spiTuple;
|
||||
|
||||
spiTuple = SPI_tuptable->vals[row];
|
||||
|
||||
/* Supply object name and type for audit event */
|
||||
auditEventStack->auditEvent.objectType =
|
||||
SPI_getvalue(spiTuple, spiTupDesc, 1);
|
||||
SPI_getvalue(spiTuple, spiTupDesc, 1);
|
||||
|
||||
auditEventStack->auditEvent.objectName =
|
||||
SPI_getvalue(spiTuple, spiTupDesc, 2);
|
||||
|
||||
|
@ -1473,11 +1490,12 @@ pg_audit_ddl_command_end(PG_FUNCTION_ARGS)
|
|||
Datum
|
||||
pg_audit_sql_drop(PG_FUNCTION_ARGS)
|
||||
{
|
||||
int result, row;
|
||||
TupleDesc spiTupDesc;
|
||||
const char *query;
|
||||
MemoryContext contextQuery;
|
||||
MemoryContext contextOld;
|
||||
int result,
|
||||
row;
|
||||
TupleDesc spiTupDesc;
|
||||
const char *query;
|
||||
MemoryContext contextQuery;
|
||||
MemoryContext contextOld;
|
||||
|
||||
if (~auditLogBitmap & LOG_DDL)
|
||||
PG_RETURN_NULL();
|
||||
|
@ -1485,7 +1503,7 @@ pg_audit_sql_drop(PG_FUNCTION_ARGS)
|
|||
/* Be sure the module was loaded */
|
||||
if (!auditEventStack)
|
||||
elog(ERROR, "pg_audit not loaded before call to "
|
||||
"pg_audit_sql_drop()");
|
||||
"pg_audit_sql_drop()");
|
||||
|
||||
/* This is an internal statement - do not log it */
|
||||
internalStatement = true;
|
||||
|
@ -1496,44 +1514,45 @@ pg_audit_sql_drop(PG_FUNCTION_ARGS)
|
|||
|
||||
/* Switch memory context for the query */
|
||||
contextQuery = AllocSetContextCreate(
|
||||
CurrentMemoryContext,
|
||||
"pg_audit_func_ddl_command_end temporary context",
|
||||
ALLOCSET_DEFAULT_MINSIZE,
|
||||
ALLOCSET_DEFAULT_INITSIZE,
|
||||
ALLOCSET_DEFAULT_MAXSIZE);
|
||||
CurrentMemoryContext,
|
||||
"pg_audit_func_ddl_command_end temporary context",
|
||||
ALLOCSET_DEFAULT_MINSIZE,
|
||||
ALLOCSET_DEFAULT_INITSIZE,
|
||||
ALLOCSET_DEFAULT_MAXSIZE);
|
||||
contextOld = MemoryContextSwitchTo(contextQuery);
|
||||
|
||||
/* Return objects affected by the drop statement */
|
||||
query = "SELECT UPPER(object_type),\n"
|
||||
" object_identity\n"
|
||||
" FROM pg_event_trigger_dropped_objects()\n"
|
||||
" WHERE lower(object_type) <> 'type'\n"
|
||||
" AND schema_name <> 'pg_toast'";
|
||||
" object_identity\n"
|
||||
" FROM pg_event_trigger_dropped_objects()\n"
|
||||
" WHERE lower(object_type) <> 'type'\n"
|
||||
" AND schema_name <> 'pg_toast'";
|
||||
|
||||
/* Attempt to connect */
|
||||
result = SPI_connect();
|
||||
if (result < 0)
|
||||
elog(ERROR, "pg_audit_ddl_drop: SPI_connect returned %d",
|
||||
result);
|
||||
result);
|
||||
|
||||
/* Execute the query */
|
||||
result = SPI_execute(query, true, 0);
|
||||
if (result != SPI_OK_SELECT)
|
||||
elog(ERROR, "pg_audit_ddl_drop: SPI_execute returned %d",
|
||||
result);
|
||||
result);
|
||||
|
||||
/* Iterate returned rows */
|
||||
spiTupDesc = SPI_tuptable->tupdesc;
|
||||
for (row = 0; row < SPI_processed; row++)
|
||||
{
|
||||
HeapTuple spiTuple;
|
||||
HeapTuple spiTuple;
|
||||
|
||||
spiTuple = SPI_tuptable->vals[row];
|
||||
|
||||
auditEventStack->auditEvent.objectType =
|
||||
SPI_getvalue(spiTuple, spiTupDesc, 1);
|
||||
SPI_getvalue(spiTuple, spiTupDesc, 1);
|
||||
|
||||
auditEventStack->auditEvent.objectName =
|
||||
SPI_getvalue(spiTuple, spiTupDesc, 2);
|
||||
SPI_getvalue(spiTuple, spiTupDesc, 2);
|
||||
|
||||
log_audit_event(auditEventStack);
|
||||
}
|
||||
|
@ -1562,10 +1581,10 @@ pg_audit_sql_drop(PG_FUNCTION_ARGS)
|
|||
static bool
|
||||
check_pg_audit_log(char **newVal, void **extra, GucSource source)
|
||||
{
|
||||
List *flagRawList;
|
||||
char *rawVal;
|
||||
ListCell *lt;
|
||||
int *flags;
|
||||
List *flagRawList;
|
||||
char *rawVal;
|
||||
ListCell *lt;
|
||||
int *flags;
|
||||
|
||||
/* Make sure newval is a comma-separated list of tokens. */
|
||||
rawVal = pstrdup(*newVal);
|
||||
|
@ -1581,18 +1600,18 @@ check_pg_audit_log(char **newVal, void **extra, GucSource source)
|
|||
* Check that we recognise each token, and add it to the bitmap we're
|
||||
* building up in a newly-allocated int *f.
|
||||
*/
|
||||
if (!(flags = (int *)malloc(sizeof(int))))
|
||||
if (!(flags = (int *) malloc(sizeof(int))))
|
||||
return false;
|
||||
|
||||
*flags = 0;
|
||||
|
||||
foreach(lt, flagRawList)
|
||||
{
|
||||
bool subtract = false;
|
||||
int class;
|
||||
bool subtract = false;
|
||||
int class;
|
||||
|
||||
/* Retrieve a token */
|
||||
char *token = (char *)lfirst(lt);
|
||||
char *token = (char *) lfirst(lt);
|
||||
|
||||
/* If token is preceded by -, then the token is subtractive */
|
||||
if (strstr(token, "-") == token)
|
||||
|
@ -1651,7 +1670,7 @@ static void
|
|||
assign_pg_audit_log(const char *newVal, void *extra)
|
||||
{
|
||||
if (extra)
|
||||
auditLogBitmap = *(int *)extra;
|
||||
auditLogBitmap = *(int *) extra;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1662,10 +1681,10 @@ assign_pg_audit_log(const char *newVal, void *extra)
|
|||
static bool
|
||||
check_pg_audit_log_level(char **newVal, void **extra, GucSource source)
|
||||
{
|
||||
int *logLevel;
|
||||
int *logLevel;
|
||||
|
||||
/* Allocate memory to store the log level */
|
||||
if (!(logLevel = (int *)malloc(sizeof(int))))
|
||||
if (!(logLevel = (int *) malloc(sizeof(int))))
|
||||
return false;
|
||||
|
||||
/* Find the log level enum */
|
||||
|
@ -1718,7 +1737,7 @@ static void
|
|||
assign_pg_audit_log_level(const char *newVal, void *extra)
|
||||
{
|
||||
if (extra)
|
||||
auditLogLevel = *(int *)extra;
|
||||
auditLogLevel = *(int *) extra;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1729,126 +1748,126 @@ _PG_init(void)
|
|||
{
|
||||
/* Define pg_audit.log */
|
||||
DefineCustomStringVariable(
|
||||
"pg_audit.log",
|
||||
"pg_audit.log",
|
||||
|
||||
"Specifies which classes of statements will be logged by session audit "
|
||||
"logging. Multiple classes can be provided using a comma-separated "
|
||||
"list and classes can be subtracted by prefacing the class with a "
|
||||
"- sign.",
|
||||
"Specifies which classes of statements will be logged by session audit "
|
||||
"logging. Multiple classes can be provided using a comma-separated "
|
||||
"list and classes can be subtracted by prefacing the class with a "
|
||||
"- sign.",
|
||||
|
||||
NULL,
|
||||
&auditLog,
|
||||
"none",
|
||||
PGC_SUSET,
|
||||
GUC_LIST_INPUT | GUC_NOT_IN_SAMPLE,
|
||||
check_pg_audit_log,
|
||||
assign_pg_audit_log,
|
||||
NULL);
|
||||
NULL,
|
||||
&auditLog,
|
||||
"none",
|
||||
PGC_SUSET,
|
||||
GUC_LIST_INPUT | GUC_NOT_IN_SAMPLE,
|
||||
check_pg_audit_log,
|
||||
assign_pg_audit_log,
|
||||
NULL);
|
||||
|
||||
/* Define pg_audit.log_catalog */
|
||||
DefineCustomBoolVariable(
|
||||
"pg_audit.log_catalog",
|
||||
"pg_audit.log_catalog",
|
||||
|
||||
"Specifies that session logging should be enabled in the case where "
|
||||
"all relations in a statement are in pg_catalog. Disabling this "
|
||||
"setting will reduce noise in the log from tools like psql and PgAdmin "
|
||||
"that query the catalog heavily.",
|
||||
"all relations in a statement are in pg_catalog. Disabling this "
|
||||
"setting will reduce noise in the log from tools like psql and PgAdmin "
|
||||
"that query the catalog heavily.",
|
||||
|
||||
NULL,
|
||||
&auditLogCatalog,
|
||||
true,
|
||||
PGC_SUSET,
|
||||
GUC_NOT_IN_SAMPLE,
|
||||
NULL, NULL, NULL);
|
||||
NULL,
|
||||
&auditLogCatalog,
|
||||
true,
|
||||
PGC_SUSET,
|
||||
GUC_NOT_IN_SAMPLE,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
/* Define pg_audit.log_level */
|
||||
DefineCustomStringVariable(
|
||||
"pg_audit.log_level",
|
||||
"pg_audit.log_level",
|
||||
|
||||
"Specifies the log level that will be used for log entries. This "
|
||||
"setting is used for regression testing and may also be useful to end "
|
||||
"users for testing or other purposes. It is not intended to be used "
|
||||
"in a production environment as it may leak which statements are being "
|
||||
"logged to the user.",
|
||||
"Specifies the log level that will be used for log entries. This "
|
||||
"setting is used for regression testing and may also be useful to end "
|
||||
"users for testing or other purposes. It is not intended to be used "
|
||||
"in a production environment as it may leak which statements are being "
|
||||
"logged to the user.",
|
||||
|
||||
NULL,
|
||||
&auditLogLevelString,
|
||||
"log",
|
||||
PGC_SUSET,
|
||||
GUC_LIST_INPUT | GUC_NOT_IN_SAMPLE,
|
||||
check_pg_audit_log_level,
|
||||
assign_pg_audit_log_level,
|
||||
NULL);
|
||||
NULL,
|
||||
&auditLogLevelString,
|
||||
"log",
|
||||
PGC_SUSET,
|
||||
GUC_LIST_INPUT | GUC_NOT_IN_SAMPLE,
|
||||
check_pg_audit_log_level,
|
||||
assign_pg_audit_log_level,
|
||||
NULL);
|
||||
|
||||
/* Define pg_audit.log_parameter */
|
||||
DefineCustomBoolVariable(
|
||||
"pg_audit.log_parameter",
|
||||
"pg_audit.log_parameter",
|
||||
|
||||
"Specifies that audit logging should include the parameters that were "
|
||||
"passed with the statement. When parameters are present they will be "
|
||||
"be included in CSV format after the statement text.",
|
||||
"Specifies that audit logging should include the parameters that were "
|
||||
"passed with the statement. When parameters are present they will be "
|
||||
"be included in CSV format after the statement text.",
|
||||
|
||||
NULL,
|
||||
&auditLogParameter,
|
||||
false,
|
||||
PGC_SUSET,
|
||||
GUC_NOT_IN_SAMPLE,
|
||||
NULL, NULL, NULL);
|
||||
NULL,
|
||||
&auditLogParameter,
|
||||
false,
|
||||
PGC_SUSET,
|
||||
GUC_NOT_IN_SAMPLE,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
/* Define pg_audit.log_relation */
|
||||
DefineCustomBoolVariable(
|
||||
"pg_audit.log_relation",
|
||||
"pg_audit.log_relation",
|
||||
|
||||
"Specifies whether session audit logging should create a separate log "
|
||||
"entry for each relation referenced in a SELECT or DML statement. "
|
||||
"This is a useful shortcut for exhaustive logging without using object "
|
||||
"audit logging.",
|
||||
"Specifies whether session audit logging should create a separate log "
|
||||
"entry for each relation referenced in a SELECT or DML statement. "
|
||||
"This is a useful shortcut for exhaustive logging without using object "
|
||||
"audit logging.",
|
||||
|
||||
NULL,
|
||||
&auditLogRelation,
|
||||
false,
|
||||
PGC_SUSET,
|
||||
GUC_NOT_IN_SAMPLE,
|
||||
NULL, NULL, NULL);
|
||||
NULL,
|
||||
&auditLogRelation,
|
||||
false,
|
||||
PGC_SUSET,
|
||||
GUC_NOT_IN_SAMPLE,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
/* Define pg_audit.log_statement_once */
|
||||
DefineCustomBoolVariable(
|
||||
"pg_audit.log_statement_once",
|
||||
"pg_audit.log_statement_once",
|
||||
|
||||
"Specifies whether logging will include the statement text and "
|
||||
"parameters with the first log entry for a statement/substatement "
|
||||
"combination or with every entry. Disabling this setting will result "
|
||||
"in less verbose logging but may make it more difficult to determine "
|
||||
"the statement that generated a log entry, though the "
|
||||
"statement/substatement pair along with the process id should suffice "
|
||||
"to identify the statement text logged with a previous entry.",
|
||||
"Specifies whether logging will include the statement text and "
|
||||
"parameters with the first log entry for a statement/substatement "
|
||||
"combination or with every entry. Disabling this setting will result "
|
||||
"in less verbose logging but may make it more difficult to determine "
|
||||
"the statement that generated a log entry, though the "
|
||||
"statement/substatement pair along with the process id should suffice "
|
||||
"to identify the statement text logged with a previous entry.",
|
||||
|
||||
NULL,
|
||||
&auditLogStatementOnce,
|
||||
false,
|
||||
PGC_SUSET,
|
||||
GUC_NOT_IN_SAMPLE,
|
||||
NULL, NULL, NULL);
|
||||
NULL,
|
||||
&auditLogStatementOnce,
|
||||
false,
|
||||
PGC_SUSET,
|
||||
GUC_NOT_IN_SAMPLE,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
/* Define pg_audit.role */
|
||||
DefineCustomStringVariable(
|
||||
"pg_audit.role",
|
||||
"pg_audit.role",
|
||||
|
||||
"Specifies the master role to use for object audit logging. Muliple "
|
||||
"audit roles can be defined by granting them to the master role. This "
|
||||
"allows multiple groups to be in charge of different aspects of audit "
|
||||
"logging.",
|
||||
"Specifies the master role to use for object audit logging. Muliple "
|
||||
"audit roles can be defined by granting them to the master role. This "
|
||||
"allows multiple groups to be in charge of different aspects of audit "
|
||||
"logging.",
|
||||
|
||||
NULL,
|
||||
&auditRole,
|
||||
"",
|
||||
PGC_SUSET,
|
||||
GUC_NOT_IN_SAMPLE,
|
||||
NULL, NULL, NULL);
|
||||
NULL,
|
||||
&auditRole,
|
||||
"",
|
||||
PGC_SUSET,
|
||||
GUC_NOT_IN_SAMPLE,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
/*
|
||||
* Install our hook functions after saving the existing pointers to preserve
|
||||
* the chains.
|
||||
* Install our hook functions after saving the existing pointers to
|
||||
* preserve the chains.
|
||||
*/
|
||||
next_ExecutorStart_hook = ExecutorStart_hook;
|
||||
ExecutorStart_hook = pg_audit_ExecutorStart_hook;
|
||||
|
|
|
@ -34,6 +34,7 @@ typedef struct
|
|||
bool isvalid;
|
||||
bool isdirty;
|
||||
uint16 usagecount;
|
||||
|
||||
/*
|
||||
* An int32 is sufficiently large, as MAX_BACKENDS prevents a buffer from
|
||||
* being pinned by too many backends and each backend will only pin once
|
||||
|
|
|
@ -138,10 +138,10 @@ typedef struct Counters
|
|||
{
|
||||
int64 calls; /* # of times executed */
|
||||
double total_time; /* total execution time, in msec */
|
||||
double min_time; /* minimim execution time in msec */
|
||||
double max_time; /* maximum execution time in msec */
|
||||
double mean_time; /* mean execution time in msec */
|
||||
double sum_var_time; /* sum of variances in execution time in msec */
|
||||
double min_time; /* minimim execution time in msec */
|
||||
double max_time; /* maximum execution time in msec */
|
||||
double mean_time; /* mean execution time in msec */
|
||||
double sum_var_time; /* sum of variances in execution time in msec */
|
||||
int64 rows; /* total # of retrieved or affected rows */
|
||||
int64 shared_blks_hit; /* # of shared buffer hits */
|
||||
int64 shared_blks_read; /* # of shared disk blocks read */
|
||||
|
@ -1231,10 +1231,10 @@ pgss_store(const char *query, uint32 queryId,
|
|||
else
|
||||
{
|
||||
/*
|
||||
* Welford's method for accurately computing variance.
|
||||
* See <http://www.johndcook.com/blog/standard_deviation/>
|
||||
* Welford's method for accurately computing variance. See
|
||||
* <http://www.johndcook.com/blog/standard_deviation/>
|
||||
*/
|
||||
double old_mean = e->counters.mean_time;
|
||||
double old_mean = e->counters.mean_time;
|
||||
|
||||
e->counters.mean_time +=
|
||||
(total_time - old_mean) / e->counters.calls;
|
||||
|
@ -1572,10 +1572,11 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo,
|
|||
values[i++] = Float8GetDatumFast(tmp.min_time);
|
||||
values[i++] = Float8GetDatumFast(tmp.max_time);
|
||||
values[i++] = Float8GetDatumFast(tmp.mean_time);
|
||||
|
||||
/*
|
||||
* Note we are calculating the population variance here, not the
|
||||
* sample variance, as we have data for the whole population,
|
||||
* so Bessel's correction is not used, and we don't divide by
|
||||
* sample variance, as we have data for the whole population, so
|
||||
* Bessel's correction is not used, and we don't divide by
|
||||
* tmp.calls - 1.
|
||||
*/
|
||||
if (tmp.calls > 1)
|
||||
|
@ -2687,16 +2688,16 @@ JumbleExpr(pgssJumbleState *jstate, Node *node)
|
|||
break;
|
||||
case T_OnConflictExpr:
|
||||
{
|
||||
OnConflictExpr *conf = (OnConflictExpr *) node;
|
||||
OnConflictExpr *conf = (OnConflictExpr *) node;
|
||||
|
||||
APP_JUMB(conf->action);
|
||||
JumbleExpr(jstate, (Node *) conf->arbiterElems);
|
||||
JumbleExpr(jstate, conf->arbiterWhere);
|
||||
JumbleExpr(jstate, (Node *) conf->onConflictSet);
|
||||
JumbleExpr(jstate, (Node *) conf->onConflictSet);
|
||||
JumbleExpr(jstate, conf->onConflictWhere);
|
||||
APP_JUMB(conf->constraint);
|
||||
APP_JUMB(conf->exclRelIndex);
|
||||
JumbleExpr(jstate, (Node *) conf->exclRelTlist);
|
||||
JumbleExpr(jstate, (Node *) conf->exclRelTlist);
|
||||
}
|
||||
break;
|
||||
case T_List:
|
||||
|
|
|
@ -399,7 +399,7 @@ pgp_extract_armor_headers(const uint8 *src, unsigned len,
|
|||
char *line;
|
||||
char *nextline;
|
||||
char *eol,
|
||||
*colon;
|
||||
*colon;
|
||||
int hlen;
|
||||
char *buf;
|
||||
int hdrlines;
|
||||
|
|
|
@ -259,6 +259,7 @@ set_arg(PGP_Context *ctx, char *key, char *val,
|
|||
res = pgp_set_convert_crlf(ctx, atoi(val));
|
||||
else if (strcmp(key, "unicode-mode") == 0)
|
||||
res = pgp_set_unicode_mode(ctx, atoi(val));
|
||||
|
||||
/*
|
||||
* The remaining options are for debugging/testing and are therefore not
|
||||
* documented in the user-facing docs.
|
||||
|
@ -834,22 +835,22 @@ static int
|
|||
parse_key_value_arrays(ArrayType *key_array, ArrayType *val_array,
|
||||
char ***p_keys, char ***p_values)
|
||||
{
|
||||
int nkdims = ARR_NDIM(key_array);
|
||||
int nvdims = ARR_NDIM(val_array);
|
||||
char **keys,
|
||||
**values;
|
||||
Datum *key_datums,
|
||||
*val_datums;
|
||||
bool *key_nulls,
|
||||
*val_nulls;
|
||||
int key_count,
|
||||
val_count;
|
||||
int i;
|
||||
int nkdims = ARR_NDIM(key_array);
|
||||
int nvdims = ARR_NDIM(val_array);
|
||||
char **keys,
|
||||
**values;
|
||||
Datum *key_datums,
|
||||
*val_datums;
|
||||
bool *key_nulls,
|
||||
*val_nulls;
|
||||
int key_count,
|
||||
val_count;
|
||||
int i;
|
||||
|
||||
if (nkdims > 1 || nkdims != nvdims)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
|
||||
errmsg("wrong number of array subscripts")));
|
||||
errmsg("wrong number of array subscripts")));
|
||||
if (nkdims == 0)
|
||||
return 0;
|
||||
|
||||
|
@ -871,7 +872,7 @@ parse_key_value_arrays(ArrayType *key_array, ArrayType *val_array,
|
|||
|
||||
for (i = 0; i < key_count; i++)
|
||||
{
|
||||
char *v;
|
||||
char *v;
|
||||
|
||||
/* Check that the key doesn't contain anything funny */
|
||||
if (key_nulls[i])
|
||||
|
@ -884,7 +885,7 @@ parse_key_value_arrays(ArrayType *key_array, ArrayType *val_array,
|
|||
if (!string_is_ascii(v))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("header key must not contain non-ASCII characters")));
|
||||
errmsg("header key must not contain non-ASCII characters")));
|
||||
if (strstr(v, ": "))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
|
@ -906,7 +907,7 @@ parse_key_value_arrays(ArrayType *key_array, ArrayType *val_array,
|
|||
if (!string_is_ascii(v))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("header value must not contain non-ASCII characters")));
|
||||
errmsg("header value must not contain non-ASCII characters")));
|
||||
if (strchr(v, '\n'))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
|
@ -1045,7 +1046,7 @@ pgp_armor_headers(PG_FUNCTION_ARGS)
|
|||
SRF_RETURN_DONE(funcctx);
|
||||
else
|
||||
{
|
||||
char *values[2];
|
||||
char *values[2];
|
||||
|
||||
/* we assume that the keys (and values) are in UTF-8. */
|
||||
utf8key = state->keys[funcctx->call_cntr];
|
||||
|
|
|
@ -278,11 +278,11 @@ void pgp_cfb_free(PGP_CFB *ctx);
|
|||
int pgp_cfb_encrypt(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst);
|
||||
int pgp_cfb_decrypt(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst);
|
||||
|
||||
void pgp_armor_encode(const uint8 *src, unsigned len, StringInfo dst,
|
||||
int num_headers, char **keys, char **values);
|
||||
void pgp_armor_encode(const uint8 *src, unsigned len, StringInfo dst,
|
||||
int num_headers, char **keys, char **values);
|
||||
int pgp_armor_decode(const uint8 *src, int len, StringInfo dst);
|
||||
int pgp_extract_armor_headers(const uint8 *src, unsigned len,
|
||||
int *nheaders, char ***keys, char ***values);
|
||||
int pgp_extract_armor_headers(const uint8 *src, unsigned len,
|
||||
int *nheaders, char ***keys, char ***values);
|
||||
|
||||
int pgp_compress_filter(PushFilter **res, PGP_Context *ctx, PushFilter *dst);
|
||||
int pgp_decompress_filter(PullFilter **res, PGP_Context *ctx, PullFilter *src);
|
||||
|
|
|
@ -84,8 +84,8 @@ statapprox_heap(Relation rel, output_type *stat)
|
|||
CHECK_FOR_INTERRUPTS();
|
||||
|
||||
/*
|
||||
* If the page has only visible tuples, then we can find out the
|
||||
* free space from the FSM and move on.
|
||||
* If the page has only visible tuples, then we can find out the free
|
||||
* space from the FSM and move on.
|
||||
*/
|
||||
if (visibilitymap_test(rel, blkno, &vmbuffer))
|
||||
{
|
||||
|
@ -103,8 +103,8 @@ statapprox_heap(Relation rel, output_type *stat)
|
|||
page = BufferGetPage(buf);
|
||||
|
||||
/*
|
||||
* It's not safe to call PageGetHeapFreeSpace() on new pages, so
|
||||
* we treat them as being free space for our purposes.
|
||||
* It's not safe to call PageGetHeapFreeSpace() on new pages, so we
|
||||
* treat them as being free space for our purposes.
|
||||
*/
|
||||
if (!PageIsNew(page))
|
||||
stat->free_space += PageGetHeapFreeSpace(page);
|
||||
|
@ -120,9 +120,9 @@ statapprox_heap(Relation rel, output_type *stat)
|
|||
scanned++;
|
||||
|
||||
/*
|
||||
* Look at each tuple on the page and decide whether it's live
|
||||
* or dead, then count it and its size. Unlike lazy_scan_heap,
|
||||
* we can afford to ignore problems and special cases.
|
||||
* Look at each tuple on the page and decide whether it's live or
|
||||
* dead, then count it and its size. Unlike lazy_scan_heap, we can
|
||||
* afford to ignore problems and special cases.
|
||||
*/
|
||||
maxoff = PageGetMaxOffsetNumber(page);
|
||||
|
||||
|
@ -179,9 +179,10 @@ statapprox_heap(Relation rel, output_type *stat)
|
|||
UnlockReleaseBuffer(buf);
|
||||
}
|
||||
|
||||
stat->table_len = (uint64) nblocks * BLCKSZ;
|
||||
stat->table_len = (uint64) nblocks *BLCKSZ;
|
||||
|
||||
stat->tuple_count = vac_estimate_reltuples(rel, false, nblocks, scanned,
|
||||
stat->tuple_count+misc_count);
|
||||
stat->tuple_count + misc_count);
|
||||
|
||||
/*
|
||||
* Calculate percentages if the relation has one or more pages.
|
||||
|
@ -240,9 +241,9 @@ pgstattuple_approx(PG_FUNCTION_ARGS)
|
|||
errmsg("cannot access temporary tables of other sessions")));
|
||||
|
||||
/*
|
||||
* We support only ordinary relations and materialised views,
|
||||
* because we depend on the visibility map and free space map
|
||||
* for our estimates about unscanned pages.
|
||||
* We support only ordinary relations and materialised views, because we
|
||||
* depend on the visibility map and free space map for our estimates about
|
||||
* unscanned pages.
|
||||
*/
|
||||
if (!(rel->rd_rel->relkind == RELKIND_RELATION ||
|
||||
rel->rd_rel->relkind == RELKIND_MATVIEW))
|
||||
|
@ -268,6 +269,6 @@ pgstattuple_approx(PG_FUNCTION_ARGS)
|
|||
values[i++] = Int64GetDatum(stat.free_space);
|
||||
values[i++] = Float8GetDatum(stat.free_percent);
|
||||
|
||||
ret = heap_form_tuple(tupdesc, values, nulls);
|
||||
ret = heap_form_tuple(tupdesc, values, nulls);
|
||||
return HeapTupleGetDatum(ret);
|
||||
}
|
||||
|
|
|
@ -203,7 +203,7 @@ typedef struct PgFdwAnalyzeState
|
|||
/* for random sampling */
|
||||
double samplerows; /* # of rows fetched */
|
||||
double rowstoskip; /* # of rows to skip before next sample */
|
||||
ReservoirStateData rstate; /* state for reservoir sampling*/
|
||||
ReservoirStateData rstate; /* state for reservoir sampling */
|
||||
|
||||
/* working memory contexts */
|
||||
MemoryContext anl_cxt; /* context for per-analyze lifespan data */
|
||||
|
|
|
@ -53,16 +53,16 @@ static void pg_decode_shutdown(LogicalDecodingContext *ctx);
|
|||
static void pg_decode_begin_txn(LogicalDecodingContext *ctx,
|
||||
ReorderBufferTXN *txn);
|
||||
static void pg_output_begin(LogicalDecodingContext *ctx,
|
||||
TestDecodingData *data,
|
||||
ReorderBufferTXN *txn,
|
||||
bool last_write);
|
||||
TestDecodingData *data,
|
||||
ReorderBufferTXN *txn,
|
||||
bool last_write);
|
||||
static void pg_decode_commit_txn(LogicalDecodingContext *ctx,
|
||||
ReorderBufferTXN *txn, XLogRecPtr commit_lsn);
|
||||
static void pg_decode_change(LogicalDecodingContext *ctx,
|
||||
ReorderBufferTXN *txn, Relation rel,
|
||||
ReorderBufferChange *change);
|
||||
static bool pg_decode_filter(LogicalDecodingContext *ctx,
|
||||
RepOriginId origin_id);
|
||||
RepOriginId origin_id);
|
||||
|
||||
void
|
||||
_PG_init(void)
|
||||
|
|
|
@ -33,14 +33,14 @@ PG_MODULE_MAGIC;
|
|||
typedef struct
|
||||
{
|
||||
SamplerRandomState randstate;
|
||||
uint32 seed; /* random seed */
|
||||
BlockNumber nblocks; /* number of block in relation */
|
||||
int32 ntuples; /* number of tuples to return */
|
||||
int32 donetuples; /* tuples already returned */
|
||||
OffsetNumber lt; /* last tuple returned from current block */
|
||||
BlockNumber step; /* step size */
|
||||
BlockNumber lb; /* last block visited */
|
||||
BlockNumber doneblocks; /* number of already returned blocks */
|
||||
uint32 seed; /* random seed */
|
||||
BlockNumber nblocks; /* number of block in relation */
|
||||
int32 ntuples; /* number of tuples to return */
|
||||
int32 donetuples; /* tuples already returned */
|
||||
OffsetNumber lt; /* last tuple returned from current block */
|
||||
BlockNumber step; /* step size */
|
||||
BlockNumber lb; /* last block visited */
|
||||
BlockNumber doneblocks; /* number of already returned blocks */
|
||||
} SystemSamplerData;
|
||||
|
||||
|
||||
|
@ -60,11 +60,11 @@ static uint32 random_relative_prime(uint32 n, SamplerRandomState randstate);
|
|||
Datum
|
||||
tsm_system_rows_init(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
uint32 seed = PG_GETARG_UINT32(1);
|
||||
int32 ntuples = PG_ARGISNULL(2) ? -1 : PG_GETARG_INT32(2);
|
||||
HeapScanDesc scan = tsdesc->heapScan;
|
||||
SystemSamplerData *sampler;
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
uint32 seed = PG_GETARG_UINT32(1);
|
||||
int32 ntuples = PG_ARGISNULL(2) ? -1 : PG_GETARG_INT32(2);
|
||||
HeapScanDesc scan = tsdesc->heapScan;
|
||||
SystemSamplerData *sampler;
|
||||
|
||||
if (ntuples < 1)
|
||||
ereport(ERROR,
|
||||
|
@ -86,6 +86,7 @@ tsm_system_rows_init(PG_FUNCTION_ARGS)
|
|||
|
||||
/* Find relative prime as step size for linear probing. */
|
||||
sampler->step = random_relative_prime(sampler->nblocks, sampler->randstate);
|
||||
|
||||
/*
|
||||
* Randomize start position so that blocks close to step size don't have
|
||||
* higher probability of being chosen on very short scan.
|
||||
|
@ -106,8 +107,8 @@ tsm_system_rows_init(PG_FUNCTION_ARGS)
|
|||
Datum
|
||||
tsm_system_rows_nextblock(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
|
||||
|
||||
sampler->lb = (sampler->lb + sampler->step) % sampler->nblocks;
|
||||
sampler->doneblocks++;
|
||||
|
@ -127,10 +128,10 @@ tsm_system_rows_nextblock(PG_FUNCTION_ARGS)
|
|||
Datum
|
||||
tsm_system_rows_nexttuple(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
OffsetNumber maxoffset = PG_GETARG_UINT16(2);
|
||||
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
|
||||
OffsetNumber tupoffset = sampler->lt;
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
OffsetNumber maxoffset = PG_GETARG_UINT16(2);
|
||||
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
|
||||
OffsetNumber tupoffset = sampler->lt;
|
||||
|
||||
if (tupoffset == InvalidOffsetNumber)
|
||||
tupoffset = FirstOffsetNumber;
|
||||
|
@ -152,9 +153,9 @@ tsm_system_rows_nexttuple(PG_FUNCTION_ARGS)
|
|||
Datum
|
||||
tsm_system_rows_examinetuple(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
bool visible = PG_GETARG_BOOL(3);
|
||||
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
bool visible = PG_GETARG_BOOL(3);
|
||||
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
|
||||
|
||||
if (!visible)
|
||||
PG_RETURN_BOOL(false);
|
||||
|
@ -183,8 +184,8 @@ tsm_system_rows_end(PG_FUNCTION_ARGS)
|
|||
Datum
|
||||
tsm_system_rows_reset(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
|
||||
|
||||
sampler->lt = InvalidOffsetNumber;
|
||||
sampler->donetuples = 0;
|
||||
|
@ -203,14 +204,14 @@ tsm_system_rows_reset(PG_FUNCTION_ARGS)
|
|||
Datum
|
||||
tsm_system_rows_cost(PG_FUNCTION_ARGS)
|
||||
{
|
||||
PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
|
||||
Path *path = (Path *) PG_GETARG_POINTER(1);
|
||||
RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
|
||||
List *args = (List *) PG_GETARG_POINTER(3);
|
||||
BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
|
||||
double *tuples = (double *) PG_GETARG_POINTER(5);
|
||||
Node *limitnode;
|
||||
int32 ntuples;
|
||||
PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
|
||||
Path *path = (Path *) PG_GETARG_POINTER(1);
|
||||
RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
|
||||
List *args = (List *) PG_GETARG_POINTER(3);
|
||||
BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
|
||||
double *tuples = (double *) PG_GETARG_POINTER(5);
|
||||
Node *limitnode;
|
||||
int32 ntuples;
|
||||
|
||||
limitnode = linitial(args);
|
||||
limitnode = estimate_expression_value(root, limitnode);
|
||||
|
@ -235,9 +236,9 @@ tsm_system_rows_cost(PG_FUNCTION_ARGS)
|
|||
|
||||
|
||||
static uint32
|
||||
gcd (uint32 a, uint32 b)
|
||||
gcd(uint32 a, uint32 b)
|
||||
{
|
||||
uint32 c;
|
||||
uint32 c;
|
||||
|
||||
while (a != 0)
|
||||
{
|
||||
|
@ -253,8 +254,8 @@ static uint32
|
|||
random_relative_prime(uint32 n, SamplerRandomState randstate)
|
||||
{
|
||||
/* Pick random starting number, with some limits on what it can be. */
|
||||
uint32 r = (uint32) sampler_random_fract(randstate) * n/2 + n/4,
|
||||
t;
|
||||
uint32 r = (uint32) sampler_random_fract(randstate) * n / 2 + n / 4,
|
||||
t;
|
||||
|
||||
/*
|
||||
* This should only take 2 or 3 iterations as the probability of 2 numbers
|
||||
|
|
|
@ -35,16 +35,17 @@ PG_MODULE_MAGIC;
|
|||
typedef struct
|
||||
{
|
||||
SamplerRandomState randstate;
|
||||
uint32 seed; /* random seed */
|
||||
BlockNumber nblocks; /* number of block in relation */
|
||||
int32 time; /* time limit for sampling */
|
||||
TimestampTz start_time; /* start time of sampling */
|
||||
TimestampTz end_time; /* end time of sampling */
|
||||
OffsetNumber lt; /* last tuple returned from current block */
|
||||
BlockNumber step; /* step size */
|
||||
BlockNumber lb; /* last block visited */
|
||||
BlockNumber estblocks; /* estimated number of returned blocks (moving) */
|
||||
BlockNumber doneblocks; /* number of already returned blocks */
|
||||
uint32 seed; /* random seed */
|
||||
BlockNumber nblocks; /* number of block in relation */
|
||||
int32 time; /* time limit for sampling */
|
||||
TimestampTz start_time; /* start time of sampling */
|
||||
TimestampTz end_time; /* end time of sampling */
|
||||
OffsetNumber lt; /* last tuple returned from current block */
|
||||
BlockNumber step; /* step size */
|
||||
BlockNumber lb; /* last block visited */
|
||||
BlockNumber estblocks; /* estimated number of returned blocks
|
||||
* (moving) */
|
||||
BlockNumber doneblocks; /* number of already returned blocks */
|
||||
} SystemSamplerData;
|
||||
|
||||
|
||||
|
@ -63,11 +64,11 @@ static uint32 random_relative_prime(uint32 n, SamplerRandomState randstate);
|
|||
Datum
|
||||
tsm_system_time_init(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
uint32 seed = PG_GETARG_UINT32(1);
|
||||
int32 time = PG_ARGISNULL(2) ? -1 : PG_GETARG_INT32(2);
|
||||
HeapScanDesc scan = tsdesc->heapScan;
|
||||
SystemSamplerData *sampler;
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
uint32 seed = PG_GETARG_UINT32(1);
|
||||
int32 time = PG_ARGISNULL(2) ? -1 : PG_GETARG_INT32(2);
|
||||
HeapScanDesc scan = tsdesc->heapScan;
|
||||
SystemSamplerData *sampler;
|
||||
|
||||
if (time < 1)
|
||||
ereport(ERROR,
|
||||
|
@ -92,6 +93,7 @@ tsm_system_time_init(PG_FUNCTION_ARGS)
|
|||
|
||||
/* Find relative prime as step size for linear probing. */
|
||||
sampler->step = random_relative_prime(sampler->nblocks, sampler->randstate);
|
||||
|
||||
/*
|
||||
* Randomize start position so that blocks close to step size don't have
|
||||
* higher probability of being chosen on very short scan.
|
||||
|
@ -111,8 +113,8 @@ tsm_system_time_init(PG_FUNCTION_ARGS)
|
|||
Datum
|
||||
tsm_system_time_nextblock(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
|
||||
|
||||
sampler->lb = (sampler->lb + sampler->step) % sampler->nblocks;
|
||||
sampler->doneblocks++;
|
||||
|
@ -125,16 +127,16 @@ tsm_system_time_nextblock(PG_FUNCTION_ARGS)
|
|||
* Update the estimations for time limit at least 10 times per estimated
|
||||
* number of returned blocks to handle variations in block read speed.
|
||||
*/
|
||||
if (sampler->doneblocks % Max(sampler->estblocks/10, 1) == 0)
|
||||
if (sampler->doneblocks % Max(sampler->estblocks / 10, 1) == 0)
|
||||
{
|
||||
TimestampTz now = GetCurrentTimestamp();
|
||||
long secs;
|
||||
int usecs;
|
||||
TimestampTz now = GetCurrentTimestamp();
|
||||
long secs;
|
||||
int usecs;
|
||||
int usecs_remaining;
|
||||
int time_per_block;
|
||||
|
||||
TimestampDifference(sampler->start_time, now, &secs, &usecs);
|
||||
usecs += (int) secs * 1000000;
|
||||
usecs += (int) secs *1000000;
|
||||
|
||||
time_per_block = usecs / sampler->doneblocks;
|
||||
|
||||
|
@ -144,7 +146,7 @@ tsm_system_time_nextblock(PG_FUNCTION_ARGS)
|
|||
PG_RETURN_UINT32(InvalidBlockNumber);
|
||||
|
||||
/* Remaining microseconds */
|
||||
usecs_remaining = usecs + (int) secs * 1000000;
|
||||
usecs_remaining = usecs + (int) secs *1000000;
|
||||
|
||||
/* Recalculate estimated returned number of blocks */
|
||||
if (time_per_block < usecs_remaining && time_per_block > 0)
|
||||
|
@ -161,10 +163,10 @@ tsm_system_time_nextblock(PG_FUNCTION_ARGS)
|
|||
Datum
|
||||
tsm_system_time_nexttuple(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
OffsetNumber maxoffset = PG_GETARG_UINT16(2);
|
||||
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
|
||||
OffsetNumber tupoffset = sampler->lt;
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
OffsetNumber maxoffset = PG_GETARG_UINT16(2);
|
||||
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
|
||||
OffsetNumber tupoffset = sampler->lt;
|
||||
|
||||
if (tupoffset == InvalidOffsetNumber)
|
||||
tupoffset = FirstOffsetNumber;
|
||||
|
@ -198,8 +200,8 @@ tsm_system_time_end(PG_FUNCTION_ARGS)
|
|||
Datum
|
||||
tsm_system_time_reset(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
|
||||
|
||||
sampler->lt = InvalidOffsetNumber;
|
||||
sampler->start_time = GetCurrentTimestamp();
|
||||
|
@ -221,18 +223,18 @@ tsm_system_time_reset(PG_FUNCTION_ARGS)
|
|||
Datum
|
||||
tsm_system_time_cost(PG_FUNCTION_ARGS)
|
||||
{
|
||||
PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
|
||||
Path *path = (Path *) PG_GETARG_POINTER(1);
|
||||
RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
|
||||
List *args = (List *) PG_GETARG_POINTER(3);
|
||||
BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
|
||||
double *tuples = (double *) PG_GETARG_POINTER(5);
|
||||
Node *limitnode;
|
||||
int32 time;
|
||||
BlockNumber relpages;
|
||||
double reltuples;
|
||||
double density;
|
||||
double spc_random_page_cost;
|
||||
PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
|
||||
Path *path = (Path *) PG_GETARG_POINTER(1);
|
||||
RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
|
||||
List *args = (List *) PG_GETARG_POINTER(3);
|
||||
BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
|
||||
double *tuples = (double *) PG_GETARG_POINTER(5);
|
||||
Node *limitnode;
|
||||
int32 time;
|
||||
BlockNumber relpages;
|
||||
double reltuples;
|
||||
double density;
|
||||
double spc_random_page_cost;
|
||||
|
||||
limitnode = linitial(args);
|
||||
limitnode = estimate_expression_value(root, limitnode);
|
||||
|
@ -269,10 +271,10 @@ tsm_system_time_cost(PG_FUNCTION_ARGS)
|
|||
/*
|
||||
* Assumption here is that we'll never read less than 1% of table pages,
|
||||
* this is here mainly because it is much less bad to overestimate than
|
||||
* underestimate and using just spc_random_page_cost will probably lead
|
||||
* to underestimations in general.
|
||||
* underestimate and using just spc_random_page_cost will probably lead to
|
||||
* underestimations in general.
|
||||
*/
|
||||
*pages = Min(baserel->pages, Max(time/spc_random_page_cost, baserel->pages/100));
|
||||
*pages = Min(baserel->pages, Max(time / spc_random_page_cost, baserel->pages / 100));
|
||||
*tuples = rint(density * (double) *pages * path->rows / baserel->tuples);
|
||||
path->rows = *tuples;
|
||||
|
||||
|
@ -280,9 +282,9 @@ tsm_system_time_cost(PG_FUNCTION_ARGS)
|
|||
}
|
||||
|
||||
static uint32
|
||||
gcd (uint32 a, uint32 b)
|
||||
gcd(uint32 a, uint32 b)
|
||||
{
|
||||
uint32 c;
|
||||
uint32 c;
|
||||
|
||||
while (a != 0)
|
||||
{
|
||||
|
@ -298,8 +300,8 @@ static uint32
|
|||
random_relative_prime(uint32 n, SamplerRandomState randstate)
|
||||
{
|
||||
/* Pick random starting number, with some limits on what it can be. */
|
||||
uint32 r = (uint32) sampler_random_fract(randstate) * n/2 + n/4,
|
||||
t;
|
||||
uint32 r = (uint32) sampler_random_fract(randstate) * n / 2 + n / 4,
|
||||
t;
|
||||
|
||||
/*
|
||||
* This should only take 2 or 3 iterations as the probability of 2 numbers
|
||||
|
|
|
@ -387,7 +387,7 @@ bringetbitmap(PG_FUNCTION_ARGS)
|
|||
*/
|
||||
Assert((key->sk_flags & SK_ISNULL) ||
|
||||
(key->sk_collation ==
|
||||
bdesc->bd_tupdesc->attrs[keyattno - 1]->attcollation));
|
||||
bdesc->bd_tupdesc->attrs[keyattno - 1]->attcollation));
|
||||
|
||||
/* First time this column? look up consistent function */
|
||||
if (consistentFn[keyattno - 1].fn_oid == InvalidOid)
|
||||
|
@ -523,10 +523,10 @@ brinbuildCallback(Relation index,
|
|||
thisblock = ItemPointerGetBlockNumber(&htup->t_self);
|
||||
|
||||
/*
|
||||
* If we're in a block that belongs to a future range, summarize what we've
|
||||
* got and start afresh. Note the scan might have skipped many pages,
|
||||
* if they were devoid of live tuples; make sure to insert index tuples
|
||||
* for those too.
|
||||
* If we're in a block that belongs to a future range, summarize what
|
||||
* we've got and start afresh. Note the scan might have skipped many
|
||||
* pages, if they were devoid of live tuples; make sure to insert index
|
||||
* tuples for those too.
|
||||
*/
|
||||
while (thisblock > state->bs_currRangeStart + state->bs_pagesPerRange - 1)
|
||||
{
|
||||
|
@ -660,7 +660,6 @@ brinbuild(PG_FUNCTION_ARGS)
|
|||
Datum
|
||||
brinbuildempty(PG_FUNCTION_ARGS)
|
||||
{
|
||||
|
||||
Relation index = (Relation) PG_GETARG_POINTER(0);
|
||||
Buffer metabuf;
|
||||
|
||||
|
@ -696,7 +695,7 @@ brinbulkdelete(PG_FUNCTION_ARGS)
|
|||
{
|
||||
/* other arguments are not currently used */
|
||||
IndexBulkDeleteResult *stats =
|
||||
(IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
|
||||
(IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
|
||||
|
||||
/* allocate stats if first time through, else re-use existing struct */
|
||||
if (stats == NULL)
|
||||
|
@ -714,7 +713,7 @@ brinvacuumcleanup(PG_FUNCTION_ARGS)
|
|||
{
|
||||
IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);
|
||||
IndexBulkDeleteResult *stats =
|
||||
(IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
|
||||
(IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
|
||||
Relation heapRel;
|
||||
|
||||
/* No-op in ANALYZE ONLY mode */
|
||||
|
@ -900,7 +899,7 @@ terminate_brin_buildstate(BrinBuildState *state)
|
|||
|
||||
page = BufferGetPage(state->bs_currentInsertBuf);
|
||||
RecordPageWithFreeSpace(state->bs_irel,
|
||||
BufferGetBlockNumber(state->bs_currentInsertBuf),
|
||||
BufferGetBlockNumber(state->bs_currentInsertBuf),
|
||||
PageGetFreeSpace(page));
|
||||
ReleaseBuffer(state->bs_currentInsertBuf);
|
||||
}
|
||||
|
|
|
@ -61,11 +61,11 @@
|
|||
* 0 - the union of the values in the block range
|
||||
* 1 - whether an empty value is present in any tuple in the block range
|
||||
* 2 - whether the values in the block range cannot be merged (e.g. an IPv6
|
||||
* address amidst IPv4 addresses).
|
||||
* address amidst IPv4 addresses).
|
||||
*/
|
||||
#define INCLUSION_UNION 0
|
||||
#define INCLUSION_UNMERGEABLE 1
|
||||
#define INCLUSION_CONTAINS_EMPTY 2
|
||||
#define INCLUSION_UNION 0
|
||||
#define INCLUSION_UNMERGEABLE 1
|
||||
#define INCLUSION_CONTAINS_EMPTY 2
|
||||
|
||||
|
||||
typedef struct InclusionOpaque
|
||||
|
@ -294,22 +294,22 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
|
|||
unionval = column->bv_values[INCLUSION_UNION];
|
||||
switch (key->sk_strategy)
|
||||
{
|
||||
/*
|
||||
* Placement strategies
|
||||
*
|
||||
* These are implemented by logically negating the result of the
|
||||
* converse placement operator; for this to work, the converse operator
|
||||
* must be part of the opclass. An error will be thrown by
|
||||
* inclusion_get_strategy_procinfo() if the required strategy is not
|
||||
* part of the opclass.
|
||||
*
|
||||
* These all return false if either argument is empty, so there is
|
||||
* no need to check for empty elements.
|
||||
*/
|
||||
/*
|
||||
* Placement strategies
|
||||
*
|
||||
* These are implemented by logically negating the result of the
|
||||
* converse placement operator; for this to work, the converse
|
||||
* operator must be part of the opclass. An error will be thrown
|
||||
* by inclusion_get_strategy_procinfo() if the required strategy
|
||||
* is not part of the opclass.
|
||||
*
|
||||
* These all return false if either argument is empty, so there is
|
||||
* no need to check for empty elements.
|
||||
*/
|
||||
|
||||
case RTLeftStrategyNumber:
|
||||
finfo = inclusion_get_strategy_procinfo(bdesc, attno, subtype,
|
||||
RTOverRightStrategyNumber);
|
||||
RTOverRightStrategyNumber);
|
||||
result = FunctionCall2Coll(finfo, colloid, unionval, query);
|
||||
PG_RETURN_BOOL(!DatumGetBool(result));
|
||||
|
||||
|
@ -333,7 +333,7 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
|
|||
|
||||
case RTBelowStrategyNumber:
|
||||
finfo = inclusion_get_strategy_procinfo(bdesc, attno, subtype,
|
||||
RTOverAboveStrategyNumber);
|
||||
RTOverAboveStrategyNumber);
|
||||
result = FunctionCall2Coll(finfo, colloid, unionval, query);
|
||||
PG_RETURN_BOOL(!DatumGetBool(result));
|
||||
|
||||
|
@ -351,7 +351,7 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
|
|||
|
||||
case RTAboveStrategyNumber:
|
||||
finfo = inclusion_get_strategy_procinfo(bdesc, attno, subtype,
|
||||
RTOverBelowStrategyNumber);
|
||||
RTOverBelowStrategyNumber);
|
||||
result = FunctionCall2Coll(finfo, colloid, unionval, query);
|
||||
PG_RETURN_BOOL(!DatumGetBool(result));
|
||||
|
||||
|
@ -381,8 +381,8 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
|
|||
* strategies because some elements can be contained even though
|
||||
* the union is not; instead we use the overlap operator.
|
||||
*
|
||||
* We check for empty elements separately as they are not merged to
|
||||
* the union but contained by everything.
|
||||
* We check for empty elements separately as they are not merged
|
||||
* to the union but contained by everything.
|
||||
*/
|
||||
|
||||
case RTContainedByStrategyNumber:
|
||||
|
@ -400,8 +400,8 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
|
|||
/*
|
||||
* Adjacent strategy
|
||||
*
|
||||
* We test for overlap first but to be safe we need to call
|
||||
* the actual adjacent operator also.
|
||||
* We test for overlap first but to be safe we need to call the
|
||||
* actual adjacent operator also.
|
||||
*
|
||||
* An empty element cannot be adjacent to any other, so there is
|
||||
* no need to check for it.
|
||||
|
@ -426,8 +426,8 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
|
|||
* the contains operator. Generally, inequality strategies do not
|
||||
* make much sense for the types which will be used with the
|
||||
* inclusion BRIN family of opclasses, but is is possible to
|
||||
* implement them with logical negation of the left-of and right-of
|
||||
* operators.
|
||||
* implement them with logical negation of the left-of and
|
||||
* right-of operators.
|
||||
*
|
||||
* NB: These strategies cannot be used with geometric datatypes
|
||||
* that use comparison of areas! The only exception is the "same"
|
||||
|
|
|
@ -33,7 +33,7 @@ Datum brin_minmax_add_value(PG_FUNCTION_ARGS);
|
|||
Datum brin_minmax_consistent(PG_FUNCTION_ARGS);
|
||||
Datum brin_minmax_union(PG_FUNCTION_ARGS);
|
||||
static FmgrInfo *minmax_get_strategy_procinfo(BrinDesc *bdesc, uint16 attno,
|
||||
Oid subtype, uint16 strategynum);
|
||||
Oid subtype, uint16 strategynum);
|
||||
|
||||
|
||||
Datum
|
||||
|
@ -209,7 +209,7 @@ brin_minmax_consistent(PG_FUNCTION_ARGS)
|
|||
break;
|
||||
/* max() >= scankey */
|
||||
finfo = minmax_get_strategy_procinfo(bdesc, attno, subtype,
|
||||
BTGreaterEqualStrategyNumber);
|
||||
BTGreaterEqualStrategyNumber);
|
||||
matches = FunctionCall2Coll(finfo, colloid, column->bv_values[1],
|
||||
value);
|
||||
break;
|
||||
|
@ -260,10 +260,10 @@ brin_minmax_union(PG_FUNCTION_ARGS)
|
|||
attr = bdesc->bd_tupdesc->attrs[attno - 1];
|
||||
|
||||
/*
|
||||
* Adjust "allnulls". If A doesn't have values, just copy the values
|
||||
* from B into A, and we're done. We cannot run the operators in this
|
||||
* case, because values in A might contain garbage. Note we already
|
||||
* established that B contains values.
|
||||
* Adjust "allnulls". If A doesn't have values, just copy the values from
|
||||
* B into A, and we're done. We cannot run the operators in this case,
|
||||
* because values in A might contain garbage. Note we already established
|
||||
* that B contains values.
|
||||
*/
|
||||
if (col_a->bv_allnulls)
|
||||
{
|
||||
|
@ -355,7 +355,7 @@ minmax_get_strategy_procinfo(BrinDesc *bdesc, uint16 attno, Oid subtype,
|
|||
strategynum, attr->atttypid, subtype, opfamily);
|
||||
|
||||
oprid = DatumGetObjectId(SysCacheGetAttr(AMOPSTRATEGY, tuple,
|
||||
Anum_pg_amop_amopopr, &isNull));
|
||||
Anum_pg_amop_amopopr, &isNull));
|
||||
ReleaseSysCache(tuple);
|
||||
Assert(!isNull && RegProcedureIsValid(oprid));
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@ struct BrinRevmap
|
|||
{
|
||||
Relation rm_irel;
|
||||
BlockNumber rm_pagesPerRange;
|
||||
BlockNumber rm_lastRevmapPage; /* cached from the metapage */
|
||||
BlockNumber rm_lastRevmapPage; /* cached from the metapage */
|
||||
Buffer rm_metaBuf;
|
||||
Buffer rm_currBuf;
|
||||
};
|
||||
|
@ -57,7 +57,7 @@ struct BrinRevmap
|
|||
|
||||
|
||||
static BlockNumber revmap_get_blkno(BrinRevmap *revmap,
|
||||
BlockNumber heapBlk);
|
||||
BlockNumber heapBlk);
|
||||
static Buffer revmap_get_buffer(BrinRevmap *revmap, BlockNumber heapBlk);
|
||||
static BlockNumber revmap_extend_and_get_blkno(BrinRevmap *revmap,
|
||||
BlockNumber heapBlk);
|
||||
|
@ -110,7 +110,7 @@ brinRevmapTerminate(BrinRevmap *revmap)
|
|||
void
|
||||
brinRevmapExtend(BrinRevmap *revmap, BlockNumber heapBlk)
|
||||
{
|
||||
BlockNumber mapBlk PG_USED_FOR_ASSERTS_ONLY;
|
||||
BlockNumber mapBlk PG_USED_FOR_ASSERTS_ONLY;
|
||||
|
||||
mapBlk = revmap_extend_and_get_blkno(revmap, heapBlk);
|
||||
|
||||
|
@ -245,7 +245,7 @@ brinGetTupleForHeapBlock(BrinRevmap *revmap, BlockNumber heapBlk,
|
|||
if (ItemPointerIsValid(&previptr) && ItemPointerEquals(&previptr, iptr))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INDEX_CORRUPTED),
|
||||
errmsg_internal("corrupted BRIN index: inconsistent range map")));
|
||||
errmsg_internal("corrupted BRIN index: inconsistent range map")));
|
||||
previptr = *iptr;
|
||||
|
||||
blk = ItemPointerGetBlockNumber(iptr);
|
||||
|
@ -356,7 +356,7 @@ revmap_get_buffer(BrinRevmap *revmap, BlockNumber heapBlk)
|
|||
static BlockNumber
|
||||
revmap_extend_and_get_blkno(BrinRevmap *revmap, BlockNumber heapBlk)
|
||||
{
|
||||
BlockNumber targetblk;
|
||||
BlockNumber targetblk;
|
||||
|
||||
/* obtain revmap block number, skip 1 for metapage block */
|
||||
targetblk = HEAPBLK_TO_REVMAP_BLK(revmap->rm_pagesPerRange, heapBlk) + 1;
|
||||
|
@ -445,10 +445,10 @@ revmap_physical_extend(BrinRevmap *revmap)
|
|||
if (!PageIsNew(page) && !BRIN_IS_REGULAR_PAGE(page))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INDEX_CORRUPTED),
|
||||
errmsg("unexpected page type 0x%04X in BRIN index \"%s\" block %u",
|
||||
BrinPageType(page),
|
||||
RelationGetRelationName(irel),
|
||||
BufferGetBlockNumber(buf))));
|
||||
errmsg("unexpected page type 0x%04X in BRIN index \"%s\" block %u",
|
||||
BrinPageType(page),
|
||||
RelationGetRelationName(irel),
|
||||
BufferGetBlockNumber(buf))));
|
||||
|
||||
/* If the page is in use, evacuate it and restart */
|
||||
if (brin_start_evacuating_page(irel, buf))
|
||||
|
|
|
@ -68,7 +68,7 @@ brtuple_disk_tupdesc(BrinDesc *brdesc)
|
|||
{
|
||||
for (j = 0; j < brdesc->bd_info[i]->oi_nstored; j++)
|
||||
TupleDescInitEntry(tupdesc, attno++, NULL,
|
||||
brdesc->bd_info[i]->oi_typcache[j]->type_id,
|
||||
brdesc->bd_info[i]->oi_typcache[j]->type_id,
|
||||
-1, 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -1785,7 +1785,8 @@ gingetbitmap(PG_FUNCTION_ARGS)
|
|||
/*
|
||||
* Set up the scan keys, and check for unsatisfiable query.
|
||||
*/
|
||||
ginFreeScanKeys(so); /* there should be no keys yet, but just to be sure */
|
||||
ginFreeScanKeys(so); /* there should be no keys yet, but just to be
|
||||
* sure */
|
||||
ginNewScanKey(scan);
|
||||
|
||||
if (GinIsVoidRes(scan))
|
||||
|
|
|
@ -527,7 +527,7 @@ ginoptions(PG_FUNCTION_ARGS)
|
|||
static const relopt_parse_elt tab[] = {
|
||||
{"fastupdate", RELOPT_TYPE_BOOL, offsetof(GinOptions, useFastUpdate)},
|
||||
{"gin_pending_list_limit", RELOPT_TYPE_INT, offsetof(GinOptions,
|
||||
pendingListCleanupSize)}
|
||||
pendingListCleanupSize)}
|
||||
};
|
||||
|
||||
options = parseRelOptions(reloptions, validate, RELOPT_KIND_GIN,
|
||||
|
|
|
@ -1407,7 +1407,7 @@ initGISTstate(Relation index)
|
|||
/* opclasses are not required to provide a Fetch method */
|
||||
if (OidIsValid(index_getprocid(index, i + 1, GIST_FETCH_PROC)))
|
||||
fmgr_info_copy(&(giststate->fetchFn[i]),
|
||||
index_getprocinfo(index, i + 1, GIST_FETCH_PROC),
|
||||
index_getprocinfo(index, i + 1, GIST_FETCH_PROC),
|
||||
scanCxt);
|
||||
else
|
||||
giststate->fetchFn[i].fn_oid = InvalidOid;
|
||||
|
|
|
@ -154,8 +154,8 @@ gistrescan(PG_FUNCTION_ARGS)
|
|||
}
|
||||
|
||||
/*
|
||||
* If we're doing an index-only scan, on the first call, also initialize
|
||||
* a tuple descriptor to represent the returned index tuples and create a
|
||||
* If we're doing an index-only scan, on the first call, also initialize a
|
||||
* tuple descriptor to represent the returned index tuples and create a
|
||||
* memory context to hold them during the scan.
|
||||
*/
|
||||
if (scan->xs_want_itup && !scan->xs_itupdesc)
|
||||
|
@ -169,7 +169,7 @@ gistrescan(PG_FUNCTION_ARGS)
|
|||
* descriptor. Instead, construct a descriptor with the original data
|
||||
* types.
|
||||
*/
|
||||
natts = RelationGetNumberOfAttributes(scan->indexRelation);
|
||||
natts = RelationGetNumberOfAttributes(scan->indexRelation);
|
||||
so->giststate->fetchTupdesc = CreateTemplateTupleDesc(natts, false);
|
||||
for (attno = 1; attno <= natts; attno++)
|
||||
{
|
||||
|
@ -288,9 +288,9 @@ gistrescan(PG_FUNCTION_ARGS)
|
|||
fmgr_info_copy(&(skey->sk_func), finfo, so->giststate->scanCxt);
|
||||
|
||||
/*
|
||||
* Look up the datatype returned by the original ordering operator.
|
||||
* GiST always uses a float8 for the distance function, but the
|
||||
* ordering operator could be anything else.
|
||||
* Look up the datatype returned by the original ordering
|
||||
* operator. GiST always uses a float8 for the distance function,
|
||||
* but the ordering operator could be anything else.
|
||||
*
|
||||
* XXX: The distance function is only allowed to be lossy if the
|
||||
* ordering operator's result type is float4 or float8. Otherwise
|
||||
|
|
|
@ -583,7 +583,7 @@ gistFormTuple(GISTSTATE *giststate, Relation r,
|
|||
isleaf);
|
||||
cep = (GISTENTRY *)
|
||||
DatumGetPointer(FunctionCall1Coll(&giststate->compressFn[i],
|
||||
giststate->supportCollation[i],
|
||||
giststate->supportCollation[i],
|
||||
PointerGetDatum(¢ry)));
|
||||
compatt[i] = cep->key;
|
||||
}
|
||||
|
|
|
@ -80,7 +80,7 @@ bool synchronize_seqscans = true;
|
|||
static HeapScanDesc heap_beginscan_internal(Relation relation,
|
||||
Snapshot snapshot,
|
||||
int nkeys, ScanKey key,
|
||||
bool allow_strat, bool allow_sync, bool allow_pagemode,
|
||||
bool allow_strat, bool allow_sync, bool allow_pagemode,
|
||||
bool is_bitmapscan, bool is_samplescan,
|
||||
bool temp_snap);
|
||||
static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup,
|
||||
|
@ -1366,8 +1366,8 @@ heap_beginscan_sampling(Relation relation, Snapshot snapshot,
|
|||
static HeapScanDesc
|
||||
heap_beginscan_internal(Relation relation, Snapshot snapshot,
|
||||
int nkeys, ScanKey key,
|
||||
bool allow_strat, bool allow_sync, bool allow_pagemode,
|
||||
bool is_bitmapscan, bool is_samplescan, bool temp_snap)
|
||||
bool allow_strat, bool allow_sync, bool allow_pagemode,
|
||||
bool is_bitmapscan, bool is_samplescan, bool temp_snap)
|
||||
{
|
||||
HeapScanDesc scan;
|
||||
|
||||
|
@ -2284,9 +2284,9 @@ heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid,
|
|||
{
|
||||
/*
|
||||
* For now, parallel operations are required to be strictly read-only.
|
||||
* Unlike heap_update() and heap_delete(), an insert should never create
|
||||
* a combo CID, so it might be possible to relax this restriction, but
|
||||
* not without more thought and testing.
|
||||
* Unlike heap_update() and heap_delete(), an insert should never create a
|
||||
* combo CID, so it might be possible to relax this restriction, but not
|
||||
* without more thought and testing.
|
||||
*/
|
||||
if (IsInParallelMode())
|
||||
ereport(ERROR,
|
||||
|
@ -2768,8 +2768,8 @@ l1:
|
|||
infomask = tp.t_data->t_infomask;
|
||||
|
||||
/*
|
||||
* Sleep until concurrent transaction ends -- except when there's a single
|
||||
* locker and it's our own transaction. Note we don't care
|
||||
* Sleep until concurrent transaction ends -- except when there's a
|
||||
* single locker and it's our own transaction. Note we don't care
|
||||
* which lock mode the locker has, because we need the strongest one.
|
||||
*
|
||||
* Before sleeping, we need to acquire tuple lock to establish our
|
||||
|
@ -2822,8 +2822,8 @@ l1:
|
|||
else if (!TransactionIdIsCurrentTransactionId(xwait))
|
||||
{
|
||||
/*
|
||||
* Wait for regular transaction to end; but first, acquire
|
||||
* tuple lock.
|
||||
* Wait for regular transaction to end; but first, acquire tuple
|
||||
* lock.
|
||||
*/
|
||||
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
|
||||
heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
|
||||
|
@ -3336,8 +3336,8 @@ l2:
|
|||
*
|
||||
* Before sleeping, we need to acquire tuple lock to establish our
|
||||
* priority for the tuple (see heap_lock_tuple). LockTuple will
|
||||
* release us when we are next-in-line for the tuple. Note we must not
|
||||
* acquire the tuple lock until we're sure we're going to sleep;
|
||||
* release us when we are next-in-line for the tuple. Note we must
|
||||
* not acquire the tuple lock until we're sure we're going to sleep;
|
||||
* otherwise we're open for race conditions with other transactions
|
||||
* holding the tuple lock which sleep on us.
|
||||
*
|
||||
|
@ -3374,8 +3374,8 @@ l2:
|
|||
*/
|
||||
if (xmax_infomask_changed(oldtup.t_data->t_infomask,
|
||||
infomask) ||
|
||||
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
|
||||
xwait))
|
||||
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
|
||||
xwait))
|
||||
goto l2;
|
||||
}
|
||||
|
||||
|
@ -3425,9 +3425,9 @@ l2:
|
|||
else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) && key_intact)
|
||||
{
|
||||
/*
|
||||
* If it's just a key-share locker, and we're not changing the
|
||||
* key columns, we don't need to wait for it to end; but we
|
||||
* need to preserve it as locker.
|
||||
* If it's just a key-share locker, and we're not changing the key
|
||||
* columns, we don't need to wait for it to end; but we need to
|
||||
* preserve it as locker.
|
||||
*/
|
||||
checked_lockers = true;
|
||||
locker_remains = true;
|
||||
|
@ -3436,8 +3436,8 @@ l2:
|
|||
else
|
||||
{
|
||||
/*
|
||||
* Wait for regular transaction to end; but first, acquire
|
||||
* tuple lock.
|
||||
* Wait for regular transaction to end; but first, acquire tuple
|
||||
* lock.
|
||||
*/
|
||||
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
|
||||
heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
|
||||
|
@ -3454,7 +3454,7 @@ l2:
|
|||
*/
|
||||
if (xmax_infomask_changed(oldtup.t_data->t_infomask, infomask) ||
|
||||
!TransactionIdEquals(xwait,
|
||||
HeapTupleHeaderGetRawXmax(oldtup.t_data)))
|
||||
HeapTupleHeaderGetRawXmax(oldtup.t_data)))
|
||||
goto l2;
|
||||
|
||||
/* Otherwise check if it committed or aborted */
|
||||
|
@ -3779,7 +3779,7 @@ l2:
|
|||
HeapTupleClearHeapOnly(newtup);
|
||||
}
|
||||
|
||||
RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */
|
||||
RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */
|
||||
|
||||
if (!already_marked)
|
||||
{
|
||||
|
@ -4477,7 +4477,7 @@ l3:
|
|||
if (require_sleep && !(infomask & HEAP_XMAX_IS_MULTI) &&
|
||||
TransactionIdIsCurrentTransactionId(xwait))
|
||||
{
|
||||
/* ... but if the xmax changed in the meantime, start over */
|
||||
/* ... but if the xmax changed in the meantime, start over */
|
||||
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
|
||||
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
|
||||
|
@ -4501,8 +4501,8 @@ l3:
|
|||
* for the tuple. We must do this even if we are share-locking.
|
||||
*
|
||||
* If we are forced to "start over" below, we keep the tuple lock;
|
||||
* this arranges that we stay at the head of the line while rechecking
|
||||
* tuple state.
|
||||
* this arranges that we stay at the head of the line while
|
||||
* rechecking tuple state.
|
||||
*/
|
||||
if (!heap_acquire_tuplock(relation, tid, mode, wait_policy,
|
||||
&have_tuple_lock))
|
||||
|
@ -4530,11 +4530,11 @@ l3:
|
|||
{
|
||||
case LockWaitBlock:
|
||||
MultiXactIdWait((MultiXactId) xwait, status, infomask,
|
||||
relation, &tuple->t_self, XLTW_Lock, NULL);
|
||||
relation, &tuple->t_self, XLTW_Lock, NULL);
|
||||
break;
|
||||
case LockWaitSkip:
|
||||
if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
|
||||
status, infomask, relation,
|
||||
status, infomask, relation,
|
||||
NULL))
|
||||
{
|
||||
result = HeapTupleWouldBlock;
|
||||
|
@ -4545,12 +4545,12 @@ l3:
|
|||
break;
|
||||
case LockWaitError:
|
||||
if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
|
||||
status, infomask, relation,
|
||||
status, infomask, relation,
|
||||
NULL))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
|
||||
errmsg("could not obtain lock on row in relation \"%s\"",
|
||||
RelationGetRelationName(relation))));
|
||||
RelationGetRelationName(relation))));
|
||||
|
||||
break;
|
||||
}
|
||||
|
@ -4588,7 +4588,7 @@ l3:
|
|||
ereport(ERROR,
|
||||
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
|
||||
errmsg("could not obtain lock on row in relation \"%s\"",
|
||||
RelationGetRelationName(relation))));
|
||||
RelationGetRelationName(relation))));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -4613,9 +4613,9 @@ l3:
|
|||
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
|
||||
/*
|
||||
* xwait is done, but if xwait had just locked the tuple then
|
||||
* some other xact could update this tuple before we get to
|
||||
* this point. Check for xmax change, and start over if so.
|
||||
* xwait is done, but if xwait had just locked the tuple then some
|
||||
* other xact could update this tuple before we get to this point.
|
||||
* Check for xmax change, and start over if so.
|
||||
*/
|
||||
if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
|
||||
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
|
||||
|
@ -4628,9 +4628,9 @@ l3:
|
|||
* Otherwise check if it committed or aborted. Note we cannot
|
||||
* be here if the tuple was only locked by somebody who didn't
|
||||
* conflict with us; that would have been handled above. So
|
||||
* that transaction must necessarily be gone by now. But don't
|
||||
* check for this in the multixact case, because some locker
|
||||
* transactions might still be running.
|
||||
* that transaction must necessarily be gone by now. But
|
||||
* don't check for this in the multixact case, because some
|
||||
* locker transactions might still be running.
|
||||
*/
|
||||
UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
|
||||
}
|
||||
|
@ -4810,8 +4810,8 @@ heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode,
|
|||
if (!ConditionalLockTupleTuplock(relation, tid, mode))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
|
||||
errmsg("could not obtain lock on row in relation \"%s\"",
|
||||
RelationGetRelationName(relation))));
|
||||
errmsg("could not obtain lock on row in relation \"%s\"",
|
||||
RelationGetRelationName(relation))));
|
||||
break;
|
||||
}
|
||||
*have_tuple_lock = true;
|
||||
|
@ -5513,8 +5513,8 @@ heap_finish_speculative(Relation relation, HeapTuple tuple)
|
|||
MarkBufferDirty(buffer);
|
||||
|
||||
/*
|
||||
* Replace the speculative insertion token with a real t_ctid,
|
||||
* pointing to itself like it does on regular tuples.
|
||||
* Replace the speculative insertion token with a real t_ctid, pointing to
|
||||
* itself like it does on regular tuples.
|
||||
*/
|
||||
htup->t_ctid = tuple->t_self;
|
||||
|
||||
|
@ -6447,23 +6447,23 @@ static bool
|
|||
DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
|
||||
LockTupleMode lockmode)
|
||||
{
|
||||
bool allow_old;
|
||||
int nmembers;
|
||||
bool allow_old;
|
||||
int nmembers;
|
||||
MultiXactMember *members;
|
||||
bool result = false;
|
||||
LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
|
||||
bool result = false;
|
||||
LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
|
||||
|
||||
allow_old = !(infomask & HEAP_LOCK_MASK) && HEAP_XMAX_IS_LOCKED_ONLY(infomask);
|
||||
nmembers = GetMultiXactIdMembers(multi, &members, allow_old,
|
||||
HEAP_XMAX_IS_LOCKED_ONLY(infomask));
|
||||
if (nmembers >= 0)
|
||||
{
|
||||
int i;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nmembers; i++)
|
||||
{
|
||||
TransactionId memxid;
|
||||
LOCKMODE memlockmode;
|
||||
TransactionId memxid;
|
||||
LOCKMODE memlockmode;
|
||||
|
||||
memlockmode = LOCKMODE_from_mxstatus(members[i].status);
|
||||
|
||||
|
@ -7093,7 +7093,7 @@ log_heap_update(Relation reln, Buffer oldbuf,
|
|||
{
|
||||
XLogRegisterBufData(0,
|
||||
((char *) newtup->t_data) + SizeofHeapTupleHeader,
|
||||
newtup->t_len - SizeofHeapTupleHeader - suffixlen);
|
||||
newtup->t_len - SizeofHeapTupleHeader - suffixlen);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -7105,8 +7105,8 @@ log_heap_update(Relation reln, Buffer oldbuf,
|
|||
if (newtup->t_data->t_hoff - SizeofHeapTupleHeader > 0)
|
||||
{
|
||||
XLogRegisterBufData(0,
|
||||
((char *) newtup->t_data) + SizeofHeapTupleHeader,
|
||||
newtup->t_data->t_hoff - SizeofHeapTupleHeader);
|
||||
((char *) newtup->t_data) + SizeofHeapTupleHeader,
|
||||
newtup->t_data->t_hoff - SizeofHeapTupleHeader);
|
||||
}
|
||||
|
||||
/* data after common prefix */
|
||||
|
@ -7289,8 +7289,8 @@ ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_changed, bool *
|
|||
{
|
||||
/*
|
||||
* The OID column can appear in an index definition, but that's
|
||||
* OK, because we always copy the OID if present (see below). Other
|
||||
* system columns may not.
|
||||
* OK, because we always copy the OID if present (see below).
|
||||
* Other system columns may not.
|
||||
*/
|
||||
if (attno == ObjectIdAttributeNumber)
|
||||
continue;
|
||||
|
|
|
@ -60,9 +60,9 @@ RelationPutHeapTuple(Relation relation,
|
|||
ItemPointerSet(&(tuple->t_self), BufferGetBlockNumber(buffer), offnum);
|
||||
|
||||
/*
|
||||
* Insert the correct position into CTID of the stored tuple, too
|
||||
* (unless this is a speculative insertion, in which case the token is
|
||||
* held in CTID field instead)
|
||||
* Insert the correct position into CTID of the stored tuple, too (unless
|
||||
* this is a speculative insertion, in which case the token is held in
|
||||
* CTID field instead)
|
||||
*/
|
||||
if (!token)
|
||||
{
|
||||
|
|
|
@ -185,11 +185,11 @@ BuildIndexValueDescription(Relation indexRelation,
|
|||
* Check permissions- if the user does not have access to view all of the
|
||||
* key columns then return NULL to avoid leaking data.
|
||||
*
|
||||
* First check if RLS is enabled for the relation. If so, return NULL
|
||||
* to avoid leaking data.
|
||||
* First check if RLS is enabled for the relation. If so, return NULL to
|
||||
* avoid leaking data.
|
||||
*
|
||||
* Next we need to check table-level SELECT access and then, if
|
||||
* there is no access there, check column-level permissions.
|
||||
* Next we need to check table-level SELECT access and then, if there is
|
||||
* no access there, check column-level permissions.
|
||||
*/
|
||||
|
||||
/*
|
||||
|
@ -215,18 +215,18 @@ BuildIndexValueDescription(Relation indexRelation,
|
|||
if (aclresult != ACLCHECK_OK)
|
||||
{
|
||||
/*
|
||||
* No table-level access, so step through the columns in the
|
||||
* index and make sure the user has SELECT rights on all of them.
|
||||
* No table-level access, so step through the columns in the index and
|
||||
* make sure the user has SELECT rights on all of them.
|
||||
*/
|
||||
for (keyno = 0; keyno < idxrec->indnatts; keyno++)
|
||||
{
|
||||
AttrNumber attnum = idxrec->indkey.values[keyno];
|
||||
|
||||
/*
|
||||
* Note that if attnum == InvalidAttrNumber, then this is an
|
||||
* index based on an expression and we return no detail rather
|
||||
* than try to figure out what column(s) the expression includes
|
||||
* and if the user has SELECT rights on them.
|
||||
* Note that if attnum == InvalidAttrNumber, then this is an index
|
||||
* based on an expression and we return no detail rather than try
|
||||
* to figure out what column(s) the expression includes and if the
|
||||
* user has SELECT rights on them.
|
||||
*/
|
||||
if (attnum == InvalidAttrNumber ||
|
||||
pg_attribute_aclcheck(indrelid, attnum, GetUserId(),
|
||||
|
|
|
@ -160,8 +160,8 @@ top:
|
|||
*/
|
||||
if (checkUnique != UNIQUE_CHECK_NO)
|
||||
{
|
||||
TransactionId xwait;
|
||||
uint32 speculativeToken;
|
||||
TransactionId xwait;
|
||||
uint32 speculativeToken;
|
||||
|
||||
offset = _bt_binsrch(rel, buf, natts, itup_scankey, false);
|
||||
xwait = _bt_check_unique(rel, itup, heapRel, buf, offset, itup_scankey,
|
||||
|
@ -171,9 +171,10 @@ top:
|
|||
{
|
||||
/* Have to wait for the other guy ... */
|
||||
_bt_relbuf(rel, buf);
|
||||
|
||||
/*
|
||||
* If it's a speculative insertion, wait for it to finish (ie.
|
||||
* to go ahead with the insertion, or kill the tuple). Otherwise
|
||||
* If it's a speculative insertion, wait for it to finish (ie. to
|
||||
* go ahead with the insertion, or kill the tuple). Otherwise
|
||||
* wait for the transaction to finish as usual.
|
||||
*/
|
||||
if (speculativeToken)
|
||||
|
@ -417,8 +418,8 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
|
|||
(errcode(ERRCODE_UNIQUE_VIOLATION),
|
||||
errmsg("duplicate key value violates unique constraint \"%s\"",
|
||||
RelationGetRelationName(rel)),
|
||||
key_desc ? errdetail("Key %s already exists.",
|
||||
key_desc) : 0,
|
||||
key_desc ? errdetail("Key %s already exists.",
|
||||
key_desc) : 0,
|
||||
errtableconstraint(heapRel,
|
||||
RelationGetRelationName(rel))));
|
||||
}
|
||||
|
|
|
@ -1233,6 +1233,7 @@ _bt_pagedel(Relation rel, Buffer buf)
|
|||
lbuf = _bt_getbuf(rel, leftsib, BT_READ);
|
||||
lpage = BufferGetPage(lbuf);
|
||||
lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage);
|
||||
|
||||
/*
|
||||
* If the left sibling is split again by another backend,
|
||||
* after we released the lock, we know that the first
|
||||
|
@ -1345,11 +1346,11 @@ _bt_mark_page_halfdead(Relation rel, Buffer leafbuf, BTStack stack)
|
|||
leafrightsib = opaque->btpo_next;
|
||||
|
||||
/*
|
||||
* Before attempting to lock the parent page, check that the right
|
||||
* sibling is not in half-dead state. A half-dead right sibling would
|
||||
* have no downlink in the parent, which would be highly confusing later
|
||||
* when we delete the downlink that follows the current page's downlink.
|
||||
* (I believe the deletion would work correctly, but it would fail the
|
||||
* Before attempting to lock the parent page, check that the right sibling
|
||||
* is not in half-dead state. A half-dead right sibling would have no
|
||||
* downlink in the parent, which would be highly confusing later when we
|
||||
* delete the downlink that follows the current page's downlink. (I
|
||||
* believe the deletion would work correctly, but it would fail the
|
||||
* cross-check we make that the following downlink points to the right
|
||||
* sibling of the delete page.)
|
||||
*/
|
||||
|
|
|
@ -40,9 +40,8 @@ typedef struct
|
|||
BTSpool *spool;
|
||||
|
||||
/*
|
||||
* spool2 is needed only when the index is a unique index. Dead tuples
|
||||
* are put into spool2 instead of spool in order to avoid uniqueness
|
||||
* check.
|
||||
* spool2 is needed only when the index is a unique index. Dead tuples are
|
||||
* put into spool2 instead of spool in order to avoid uniqueness check.
|
||||
*/
|
||||
BTSpool *spool2;
|
||||
double indtuples;
|
||||
|
|
|
@ -1027,10 +1027,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
|
|||
offnum = OffsetNumberPrev(offnum);
|
||||
|
||||
/*
|
||||
* By here the scan position is now set for the first key. If all
|
||||
* further tuples are expected to match we set the SK_BT_MATCHED flag
|
||||
* to avoid re-checking the scan key later. This is a big win for
|
||||
* slow key matches though is still significant even for fast datatypes.
|
||||
* By here the scan position is now set for the first key. If all further
|
||||
* tuples are expected to match we set the SK_BT_MATCHED flag to avoid
|
||||
* re-checking the scan key later. This is a big win for slow key matches
|
||||
* though is still significant even for fast datatypes.
|
||||
*/
|
||||
switch (startKeys[0]->sk_strategy)
|
||||
{
|
||||
|
|
|
@ -742,7 +742,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
|
|||
{
|
||||
for (i = 1; i <= keysz; i++)
|
||||
{
|
||||
SortSupport entry;
|
||||
SortSupport entry;
|
||||
Datum attrDatum1,
|
||||
attrDatum2;
|
||||
bool isNull1,
|
||||
|
|
|
@ -1430,8 +1430,8 @@ _bt_checkkeys(IndexScanDesc scan,
|
|||
Datum test;
|
||||
|
||||
/*
|
||||
* If the scan key has already matched we can skip this key, as
|
||||
* long as the index tuple does not contain NULL values.
|
||||
* If the scan key has already matched we can skip this key, as long
|
||||
* as the index tuple does not contain NULL values.
|
||||
*/
|
||||
if (key->sk_flags & SK_BT_MATCHED && !IndexTupleHasNulls(tuple))
|
||||
continue;
|
||||
|
@ -1740,7 +1740,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
|
|||
* any items from the page, and so there is no need to search left from the
|
||||
* recorded offset. (This observation also guarantees that the item is still
|
||||
* the right one to delete, which might otherwise be questionable since heap
|
||||
* TIDs can get recycled.) This holds true even if the page has been modified
|
||||
* TIDs can get recycled.) This holds true even if the page has been modified
|
||||
* by inserts and page splits, so there is no need to consult the LSN.
|
||||
*
|
||||
* If the pin was released after reading the page, then we re-read it. If it
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* committsdesc.c
|
||||
* rmgr descriptor routines for access/transam/commit_ts.c
|
||||
* rmgr descriptor routines for access/transam/commit_ts.c
|
||||
*
|
||||
* Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* src/backend/access/rmgrdesc/committsdesc.c
|
||||
* src/backend/access/rmgrdesc/committsdesc.c
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -41,7 +41,7 @@ commit_ts_desc(StringInfo buf, XLogReaderState *record)
|
|||
else if (info == COMMIT_TS_SETTS)
|
||||
{
|
||||
xl_commit_ts_set *xlrec = (xl_commit_ts_set *) rec;
|
||||
int nsubxids;
|
||||
int nsubxids;
|
||||
|
||||
appendStringInfo(buf, "set %s/%d for: %u",
|
||||
timestamptz_to_str(xlrec->timestamp),
|
||||
|
@ -51,7 +51,7 @@ commit_ts_desc(StringInfo buf, XLogReaderState *record)
|
|||
sizeof(TransactionId));
|
||||
if (nsubxids > 0)
|
||||
{
|
||||
int i;
|
||||
int i;
|
||||
TransactionId *subxids;
|
||||
|
||||
subxids = palloc(sizeof(TransactionId) * nsubxids);
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* replorigindesc.c
|
||||
* rmgr descriptor routines for replication/logical/replication_origin.c
|
||||
* rmgr descriptor routines for replication/logical/replication_origin.c
|
||||
*
|
||||
* Portions Copyright (c) 2015, PostgreSQL Global Development Group
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* src/backend/access/rmgrdesc/replorigindesc.c
|
||||
* src/backend/access/rmgrdesc/replorigindesc.c
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -26,6 +26,7 @@ replorigin_desc(StringInfo buf, XLogReaderState *record)
|
|||
case XLOG_REPLORIGIN_SET:
|
||||
{
|
||||
xl_replorigin_set *xlrec;
|
||||
|
||||
xlrec = (xl_replorigin_set *) rec;
|
||||
|
||||
appendStringInfo(buf, "set %u; lsn %X/%X; force: %d",
|
||||
|
@ -38,6 +39,7 @@ replorigin_desc(StringInfo buf, XLogReaderState *record)
|
|||
case XLOG_REPLORIGIN_DROP:
|
||||
{
|
||||
xl_replorigin_drop *xlrec;
|
||||
|
||||
xlrec = (xl_replorigin_drop *) rec;
|
||||
|
||||
appendStringInfo(buf, "drop %u", xlrec->node_id);
|
||||
|
|
|
@ -37,7 +37,8 @@ ParseCommitRecord(uint8 info, xl_xact_commit *xlrec, xl_xact_parsed_commit *pars
|
|||
|
||||
memset(parsed, 0, sizeof(*parsed));
|
||||
|
||||
parsed->xinfo = 0; /* default, if no XLOG_XACT_HAS_INFO is present */
|
||||
parsed->xinfo = 0; /* default, if no XLOG_XACT_HAS_INFO is
|
||||
* present */
|
||||
|
||||
parsed->xact_time = xlrec->xact_time;
|
||||
|
||||
|
@ -62,7 +63,7 @@ ParseCommitRecord(uint8 info, xl_xact_commit *xlrec, xl_xact_parsed_commit *pars
|
|||
|
||||
if (parsed->xinfo & XACT_XINFO_HAS_SUBXACTS)
|
||||
{
|
||||
xl_xact_subxacts *xl_subxacts = (xl_xact_subxacts *) data;
|
||||
xl_xact_subxacts *xl_subxacts = (xl_xact_subxacts *) data;
|
||||
|
||||
parsed->nsubxacts = xl_subxacts->nsubxacts;
|
||||
parsed->subxacts = xl_subxacts->subxacts;
|
||||
|
@ -123,7 +124,8 @@ ParseAbortRecord(uint8 info, xl_xact_abort *xlrec, xl_xact_parsed_abort *parsed)
|
|||
|
||||
memset(parsed, 0, sizeof(*parsed));
|
||||
|
||||
parsed->xinfo = 0; /* default, if no XLOG_XACT_HAS_INFO is present */
|
||||
parsed->xinfo = 0; /* default, if no XLOG_XACT_HAS_INFO is
|
||||
* present */
|
||||
|
||||
parsed->xact_time = xlrec->xact_time;
|
||||
|
||||
|
@ -138,7 +140,7 @@ ParseAbortRecord(uint8 info, xl_xact_abort *xlrec, xl_xact_parsed_abort *parsed)
|
|||
|
||||
if (parsed->xinfo & XACT_XINFO_HAS_SUBXACTS)
|
||||
{
|
||||
xl_xact_subxacts *xl_subxacts = (xl_xact_subxacts *) data;
|
||||
xl_xact_subxacts *xl_subxacts = (xl_xact_subxacts *) data;
|
||||
|
||||
parsed->nsubxacts = xl_subxacts->nsubxacts;
|
||||
parsed->subxacts = xl_subxacts->subxacts;
|
||||
|
@ -236,8 +238,8 @@ xact_desc_commit(StringInfo buf, uint8 info, xl_xact_commit *xlrec, RepOriginId
|
|||
{
|
||||
appendStringInfo(buf, "; origin: node %u, lsn %X/%X, at %s",
|
||||
origin_id,
|
||||
(uint32)(parsed.origin_lsn >> 32),
|
||||
(uint32)parsed.origin_lsn,
|
||||
(uint32) (parsed.origin_lsn >> 32),
|
||||
(uint32) parsed.origin_lsn,
|
||||
timestamptz_to_str(parsed.origin_timestamp));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -658,6 +658,7 @@ Datum
|
|||
spgcanreturn(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Relation index = (Relation) PG_GETARG_POINTER(0);
|
||||
|
||||
/* int i = PG_GETARG_INT32(1); */
|
||||
SpGistCache *cache;
|
||||
|
||||
|
|
|
@ -27,13 +27,15 @@
|
|||
/* tsdesc */
|
||||
typedef struct
|
||||
{
|
||||
uint32 seed; /* random seed */
|
||||
BlockNumber startblock; /* starting block, we use ths for syncscan support */
|
||||
uint32 seed; /* random seed */
|
||||
BlockNumber startblock; /* starting block, we use ths for syncscan
|
||||
* support */
|
||||
BlockNumber nblocks; /* number of blocks */
|
||||
BlockNumber blockno; /* current block */
|
||||
float4 probability; /* probabilty that tuple will be returned (0.0-1.0) */
|
||||
float4 probability; /* probabilty that tuple will be returned
|
||||
* (0.0-1.0) */
|
||||
OffsetNumber lt; /* last tuple returned from current block */
|
||||
SamplerRandomState randstate; /* random generator tsdesc */
|
||||
SamplerRandomState randstate; /* random generator tsdesc */
|
||||
} BernoulliSamplerData;
|
||||
|
||||
/*
|
||||
|
@ -42,10 +44,10 @@ typedef struct
|
|||
Datum
|
||||
tsm_bernoulli_init(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
uint32 seed = PG_GETARG_UINT32(1);
|
||||
float4 percent = PG_ARGISNULL(2) ? -1 : PG_GETARG_FLOAT4(2);
|
||||
HeapScanDesc scan = tsdesc->heapScan;
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
uint32 seed = PG_GETARG_UINT32(1);
|
||||
float4 percent = PG_ARGISNULL(2) ? -1 : PG_GETARG_FLOAT4(2);
|
||||
HeapScanDesc scan = tsdesc->heapScan;
|
||||
BernoulliSamplerData *sampler;
|
||||
|
||||
if (percent < 0 || percent > 100)
|
||||
|
@ -77,14 +79,13 @@ tsm_bernoulli_init(PG_FUNCTION_ARGS)
|
|||
Datum
|
||||
tsm_bernoulli_nextblock(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
BernoulliSamplerData *sampler =
|
||||
(BernoulliSamplerData *) tsdesc->tsmdata;
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
BernoulliSamplerData *sampler =
|
||||
(BernoulliSamplerData *) tsdesc->tsmdata;
|
||||
|
||||
/*
|
||||
* Bernoulli sampling scans all blocks on the table and supports
|
||||
* syncscan so loop from startblock to startblock instead of
|
||||
* from 0 to nblocks.
|
||||
* Bernoulli sampling scans all blocks on the table and supports syncscan
|
||||
* so loop from startblock to startblock instead of from 0 to nblocks.
|
||||
*/
|
||||
if (sampler->blockno == InvalidBlockNumber)
|
||||
sampler->blockno = sampler->startblock;
|
||||
|
@ -116,7 +117,7 @@ tsm_bernoulli_nextblock(PG_FUNCTION_ARGS)
|
|||
* tuples have same probability of being returned the visible and invisible
|
||||
* tuples will be returned in same ratio as they have in the actual table.
|
||||
* This means that there is no skew towards either visible or invisible tuples
|
||||
* and the number returned visible tuples to from the executor node is the
|
||||
* and the number returned visible tuples to from the executor node is the
|
||||
* fraction of visible tuples which was specified in input.
|
||||
*
|
||||
* This is faster than doing the coinflip in the examinetuple because we don't
|
||||
|
@ -128,12 +129,12 @@ tsm_bernoulli_nextblock(PG_FUNCTION_ARGS)
|
|||
Datum
|
||||
tsm_bernoulli_nexttuple(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
OffsetNumber maxoffset = PG_GETARG_UINT16(2);
|
||||
BernoulliSamplerData *sampler =
|
||||
(BernoulliSamplerData *) tsdesc->tsmdata;
|
||||
OffsetNumber tupoffset = sampler->lt;
|
||||
float4 probability = sampler->probability;
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
OffsetNumber maxoffset = PG_GETARG_UINT16(2);
|
||||
BernoulliSamplerData *sampler =
|
||||
(BernoulliSamplerData *) tsdesc->tsmdata;
|
||||
OffsetNumber tupoffset = sampler->lt;
|
||||
float4 probability = sampler->probability;
|
||||
|
||||
if (tupoffset == InvalidOffsetNumber)
|
||||
tupoffset = FirstOffsetNumber;
|
||||
|
@ -142,8 +143,8 @@ tsm_bernoulli_nexttuple(PG_FUNCTION_ARGS)
|
|||
|
||||
/*
|
||||
* Loop over tuple offsets until the random generator returns value that
|
||||
* is within the probability of returning the tuple or until we reach
|
||||
* end of the block.
|
||||
* is within the probability of returning the tuple or until we reach end
|
||||
* of the block.
|
||||
*
|
||||
* (This is our implementation of bernoulli trial)
|
||||
*/
|
||||
|
@ -183,9 +184,9 @@ tsm_bernoulli_end(PG_FUNCTION_ARGS)
|
|||
Datum
|
||||
tsm_bernoulli_reset(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
BernoulliSamplerData *sampler =
|
||||
(BernoulliSamplerData *) tsdesc->tsmdata;
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
BernoulliSamplerData *sampler =
|
||||
(BernoulliSamplerData *) tsdesc->tsmdata;
|
||||
|
||||
sampler->blockno = InvalidBlockNumber;
|
||||
sampler->lt = InvalidOffsetNumber;
|
||||
|
@ -200,14 +201,14 @@ tsm_bernoulli_reset(PG_FUNCTION_ARGS)
|
|||
Datum
|
||||
tsm_bernoulli_cost(PG_FUNCTION_ARGS)
|
||||
{
|
||||
PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
|
||||
Path *path = (Path *) PG_GETARG_POINTER(1);
|
||||
RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
|
||||
List *args = (List *) PG_GETARG_POINTER(3);
|
||||
BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
|
||||
double *tuples = (double *) PG_GETARG_POINTER(5);
|
||||
Node *pctnode;
|
||||
float4 samplesize;
|
||||
PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
|
||||
Path *path = (Path *) PG_GETARG_POINTER(1);
|
||||
RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
|
||||
List *args = (List *) PG_GETARG_POINTER(3);
|
||||
BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
|
||||
double *tuples = (double *) PG_GETARG_POINTER(5);
|
||||
Node *pctnode;
|
||||
float4 samplesize;
|
||||
|
||||
*pages = baserel->pages;
|
||||
|
||||
|
|
|
@ -31,9 +31,9 @@
|
|||
typedef struct
|
||||
{
|
||||
BlockSamplerData bs;
|
||||
uint32 seed; /* random seed */
|
||||
uint32 seed; /* random seed */
|
||||
BlockNumber nblocks; /* number of block in relation */
|
||||
int samplesize; /* number of blocks to return */
|
||||
int samplesize; /* number of blocks to return */
|
||||
OffsetNumber lt; /* last tuple returned from current block */
|
||||
} SystemSamplerData;
|
||||
|
||||
|
@ -44,11 +44,11 @@ typedef struct
|
|||
Datum
|
||||
tsm_system_init(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
uint32 seed = PG_GETARG_UINT32(1);
|
||||
float4 percent = PG_ARGISNULL(2) ? -1 : PG_GETARG_FLOAT4(2);
|
||||
HeapScanDesc scan = tsdesc->heapScan;
|
||||
SystemSamplerData *sampler;
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
uint32 seed = PG_GETARG_UINT32(1);
|
||||
float4 percent = PG_ARGISNULL(2) ? -1 : PG_GETARG_FLOAT4(2);
|
||||
HeapScanDesc scan = tsdesc->heapScan;
|
||||
SystemSamplerData *sampler;
|
||||
|
||||
if (percent < 0 || percent > 100)
|
||||
ereport(ERROR,
|
||||
|
@ -80,9 +80,9 @@ tsm_system_init(PG_FUNCTION_ARGS)
|
|||
Datum
|
||||
tsm_system_nextblock(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
|
||||
BlockNumber blockno;
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
|
||||
BlockNumber blockno;
|
||||
|
||||
if (!BlockSampler_HasMore(&sampler->bs))
|
||||
PG_RETURN_UINT32(InvalidBlockNumber);
|
||||
|
@ -99,10 +99,10 @@ tsm_system_nextblock(PG_FUNCTION_ARGS)
|
|||
Datum
|
||||
tsm_system_nexttuple(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
OffsetNumber maxoffset = PG_GETARG_UINT16(2);
|
||||
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
|
||||
OffsetNumber tupoffset = sampler->lt;
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
OffsetNumber maxoffset = PG_GETARG_UINT16(2);
|
||||
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
|
||||
OffsetNumber tupoffset = sampler->lt;
|
||||
|
||||
if (tupoffset == InvalidOffsetNumber)
|
||||
tupoffset = FirstOffsetNumber;
|
||||
|
@ -136,8 +136,8 @@ tsm_system_end(PG_FUNCTION_ARGS)
|
|||
Datum
|
||||
tsm_system_reset(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
|
||||
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
|
||||
|
||||
sampler->lt = InvalidOffsetNumber;
|
||||
BlockSampler_Init(&sampler->bs, sampler->nblocks, sampler->samplesize,
|
||||
|
@ -152,14 +152,14 @@ tsm_system_reset(PG_FUNCTION_ARGS)
|
|||
Datum
|
||||
tsm_system_cost(PG_FUNCTION_ARGS)
|
||||
{
|
||||
PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
|
||||
Path *path = (Path *) PG_GETARG_POINTER(1);
|
||||
RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
|
||||
List *args = (List *) PG_GETARG_POINTER(3);
|
||||
BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
|
||||
double *tuples = (double *) PG_GETARG_POINTER(5);
|
||||
Node *pctnode;
|
||||
float4 samplesize;
|
||||
PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
|
||||
Path *path = (Path *) PG_GETARG_POINTER(1);
|
||||
RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
|
||||
List *args = (List *) PG_GETARG_POINTER(3);
|
||||
BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
|
||||
double *tuples = (double *) PG_GETARG_POINTER(5);
|
||||
Node *pctnode;
|
||||
float4 samplesize;
|
||||
|
||||
pctnode = linitial(args);
|
||||
pctnode = estimate_expression_value(root, pctnode);
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* tablesample.c
|
||||
* TABLESAMPLE internal API
|
||||
* TABLESAMPLE internal API
|
||||
*
|
||||
* Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* src/backend/access/tablesample/tablesample.c
|
||||
* src/backend/access/tablesample/tablesample.c
|
||||
*
|
||||
* TABLESAMPLE is the SQL standard clause for sampling the relations.
|
||||
*
|
||||
|
@ -53,7 +53,7 @@ tablesample_init(SampleScanState *scanstate, TableSampleClause *tablesample)
|
|||
List *args = tablesample->args;
|
||||
ListCell *arg;
|
||||
ExprContext *econtext = scanstate->ss.ps.ps_ExprContext;
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) palloc0(sizeof(TableSampleDesc));
|
||||
TableSampleDesc *tsdesc = (TableSampleDesc *) palloc0(sizeof(TableSampleDesc));
|
||||
|
||||
/* Load functions */
|
||||
fmgr_info(tablesample->tsminit, &(tsdesc->tsminit));
|
||||
|
@ -78,21 +78,21 @@ tablesample_init(SampleScanState *scanstate, TableSampleClause *tablesample)
|
|||
fcinfo.argnull[0] = false;
|
||||
|
||||
/*
|
||||
* Second arg for init function is always REPEATABLE
|
||||
* When tablesample->repeatable is NULL then REPEATABLE clause was not
|
||||
* specified.
|
||||
* When specified, the expression cannot evaluate to NULL.
|
||||
* Second arg for init function is always REPEATABLE When
|
||||
* tablesample->repeatable is NULL then REPEATABLE clause was not
|
||||
* specified. When specified, the expression cannot evaluate to NULL.
|
||||
*/
|
||||
if (tablesample->repeatable)
|
||||
{
|
||||
ExprState *argstate = ExecInitExpr((Expr *) tablesample->repeatable,
|
||||
(PlanState *) scanstate);
|
||||
|
||||
fcinfo.arg[1] = ExecEvalExpr(argstate, econtext,
|
||||
&fcinfo.argnull[1], NULL);
|
||||
if (fcinfo.argnull[1])
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
|
||||
errmsg("REPEATABLE clause must be NOT NULL numeric value")));
|
||||
errmsg("REPEATABLE clause must be NOT NULL numeric value")));
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -130,15 +130,15 @@ tablesample_init(SampleScanState *scanstate, TableSampleClause *tablesample)
|
|||
HeapTuple
|
||||
tablesample_getnext(TableSampleDesc *desc)
|
||||
{
|
||||
HeapScanDesc scan = desc->heapScan;
|
||||
HeapTuple tuple = &(scan->rs_ctup);
|
||||
bool pagemode = scan->rs_pageatatime;
|
||||
BlockNumber blockno;
|
||||
Page page;
|
||||
bool page_all_visible;
|
||||
ItemId itemid;
|
||||
OffsetNumber tupoffset,
|
||||
maxoffset;
|
||||
HeapScanDesc scan = desc->heapScan;
|
||||
HeapTuple tuple = &(scan->rs_ctup);
|
||||
bool pagemode = scan->rs_pageatatime;
|
||||
BlockNumber blockno;
|
||||
Page page;
|
||||
bool page_all_visible;
|
||||
ItemId itemid;
|
||||
OffsetNumber tupoffset,
|
||||
maxoffset;
|
||||
|
||||
if (!scan->rs_inited)
|
||||
{
|
||||
|
@ -152,7 +152,7 @@ tablesample_getnext(TableSampleDesc *desc)
|
|||
return NULL;
|
||||
}
|
||||
blockno = DatumGetInt32(FunctionCall1(&desc->tsmnextblock,
|
||||
PointerGetDatum(desc)));
|
||||
PointerGetDatum(desc)));
|
||||
if (!BlockNumberIsValid(blockno))
|
||||
{
|
||||
tuple->t_data = NULL;
|
||||
|
@ -184,14 +184,14 @@ tablesample_getnext(TableSampleDesc *desc)
|
|||
CHECK_FOR_INTERRUPTS();
|
||||
|
||||
tupoffset = DatumGetUInt16(FunctionCall3(&desc->tsmnexttuple,
|
||||
PointerGetDatum(desc),
|
||||
UInt32GetDatum(blockno),
|
||||
UInt16GetDatum(maxoffset)));
|
||||
PointerGetDatum(desc),
|
||||
UInt32GetDatum(blockno),
|
||||
UInt16GetDatum(maxoffset)));
|
||||
|
||||
if (OffsetNumberIsValid(tupoffset))
|
||||
{
|
||||
bool visible;
|
||||
bool found;
|
||||
bool visible;
|
||||
bool found;
|
||||
|
||||
/* Skip invalid tuple pointers. */
|
||||
itemid = PageGetItemId(page, tupoffset);
|
||||
|
@ -208,8 +208,8 @@ tablesample_getnext(TableSampleDesc *desc)
|
|||
visible = SampleTupleVisible(tuple, tupoffset, scan);
|
||||
|
||||
/*
|
||||
* Let the sampling method examine the actual tuple and decide if we
|
||||
* should return it.
|
||||
* Let the sampling method examine the actual tuple and decide if
|
||||
* we should return it.
|
||||
*
|
||||
* Note that we let it examine even invisible tuples for
|
||||
* statistical purposes, but not return them since user should
|
||||
|
@ -218,10 +218,10 @@ tablesample_getnext(TableSampleDesc *desc)
|
|||
if (OidIsValid(desc->tsmexaminetuple.fn_oid))
|
||||
{
|
||||
found = DatumGetBool(FunctionCall4(&desc->tsmexaminetuple,
|
||||
PointerGetDatum(desc),
|
||||
UInt32GetDatum(blockno),
|
||||
PointerGetDatum(tuple),
|
||||
BoolGetDatum(visible)));
|
||||
PointerGetDatum(desc),
|
||||
UInt32GetDatum(blockno),
|
||||
PointerGetDatum(tuple),
|
||||
BoolGetDatum(visible)));
|
||||
/* Should not happen if sampling method is well written. */
|
||||
if (found && !visible)
|
||||
elog(ERROR, "Sampling method wanted to return invisible tuple");
|
||||
|
@ -248,19 +248,19 @@ tablesample_getnext(TableSampleDesc *desc)
|
|||
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
|
||||
|
||||
blockno = DatumGetInt32(FunctionCall1(&desc->tsmnextblock,
|
||||
PointerGetDatum(desc)));
|
||||
PointerGetDatum(desc)));
|
||||
|
||||
/*
|
||||
* Report our new scan position for synchronization purposes. We
|
||||
* don't do that when moving backwards, however. That would just
|
||||
* mess up any other forward-moving scanners.
|
||||
* Report our new scan position for synchronization purposes. We don't
|
||||
* do that when moving backwards, however. That would just mess up any
|
||||
* other forward-moving scanners.
|
||||
*
|
||||
* Note: we do this before checking for end of scan so that the
|
||||
* final state of the position hint is back at the start of the
|
||||
* rel. That's not strictly necessary, but otherwise when you run
|
||||
* the same query multiple times the starting position would shift
|
||||
* a little bit backwards on every invocation, which is confusing.
|
||||
* We don't guarantee any specific ordering in general, though.
|
||||
* Note: we do this before checking for end of scan so that the final
|
||||
* state of the position hint is back at the start of the rel. That's
|
||||
* not strictly necessary, but otherwise when you run the same query
|
||||
* multiple times the starting position would shift a little bit
|
||||
* backwards on every invocation, which is confusing. We don't
|
||||
* guarantee any specific ordering in general, though.
|
||||
*/
|
||||
if (scan->rs_syncscan)
|
||||
ss_report_location(scan->rs_rd, BlockNumberIsValid(blockno) ?
|
||||
|
@ -321,25 +321,25 @@ SampleTupleVisible(HeapTuple tuple, OffsetNumber tupoffset, HeapScanDesc scan)
|
|||
{
|
||||
/*
|
||||
* If this scan is reading whole pages at a time, there is already
|
||||
* visibility info present in rs_vistuples so we can just search it
|
||||
* for the tupoffset.
|
||||
* visibility info present in rs_vistuples so we can just search it for
|
||||
* the tupoffset.
|
||||
*/
|
||||
if (scan->rs_pageatatime)
|
||||
{
|
||||
int start = 0,
|
||||
end = scan->rs_ntuples - 1;
|
||||
int start = 0,
|
||||
end = scan->rs_ntuples - 1;
|
||||
|
||||
/*
|
||||
* Do the binary search over rs_vistuples, it's already sorted by
|
||||
* OffsetNumber so we don't need to do any sorting ourselves here.
|
||||
*
|
||||
* We could use bsearch() here but it's slower for integers because
|
||||
* of the function call overhead and because it needs boiler plate code
|
||||
* We could use bsearch() here but it's slower for integers because of
|
||||
* the function call overhead and because it needs boiler plate code
|
||||
* it would not save us anything code-wise anyway.
|
||||
*/
|
||||
while (start <= end)
|
||||
{
|
||||
int mid = start + (end - start) / 2;
|
||||
int mid = start + (end - start) / 2;
|
||||
OffsetNumber curoffset = scan->rs_vistuples[mid];
|
||||
|
||||
if (curoffset == tupoffset)
|
||||
|
@ -358,7 +358,7 @@ SampleTupleVisible(HeapTuple tuple, OffsetNumber tupoffset, HeapScanDesc scan)
|
|||
Snapshot snapshot = scan->rs_snapshot;
|
||||
Buffer buffer = scan->rs_cbuf;
|
||||
|
||||
bool visible = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
|
||||
bool visible = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
|
||||
|
||||
CheckForSerializableConflictOut(visible, scan->rs_rd, tuple, buffer,
|
||||
snapshot);
|
||||
|
|
|
@ -55,8 +55,8 @@
|
|||
*/
|
||||
typedef struct CommitTimestampEntry
|
||||
{
|
||||
TimestampTz time;
|
||||
RepOriginId nodeid;
|
||||
TimestampTz time;
|
||||
RepOriginId nodeid;
|
||||
} CommitTimestampEntry;
|
||||
|
||||
#define SizeOfCommitTimestampEntry (offsetof(CommitTimestampEntry, nodeid) + \
|
||||
|
@ -65,7 +65,7 @@ typedef struct CommitTimestampEntry
|
|||
#define COMMIT_TS_XACTS_PER_PAGE \
|
||||
(BLCKSZ / SizeOfCommitTimestampEntry)
|
||||
|
||||
#define TransactionIdToCTsPage(xid) \
|
||||
#define TransactionIdToCTsPage(xid) \
|
||||
((xid) / (TransactionId) COMMIT_TS_XACTS_PER_PAGE)
|
||||
#define TransactionIdToCTsEntry(xid) \
|
||||
((xid) % (TransactionId) COMMIT_TS_XACTS_PER_PAGE)
|
||||
|
@ -83,21 +83,21 @@ static SlruCtlData CommitTsCtlData;
|
|||
*/
|
||||
typedef struct CommitTimestampShared
|
||||
{
|
||||
TransactionId xidLastCommit;
|
||||
TransactionId xidLastCommit;
|
||||
CommitTimestampEntry dataLastCommit;
|
||||
} CommitTimestampShared;
|
||||
|
||||
CommitTimestampShared *commitTsShared;
|
||||
CommitTimestampShared *commitTsShared;
|
||||
|
||||
|
||||
/* GUC variable */
|
||||
bool track_commit_timestamp;
|
||||
bool track_commit_timestamp;
|
||||
|
||||
static void SetXidCommitTsInPage(TransactionId xid, int nsubxids,
|
||||
TransactionId *subxids, TimestampTz ts,
|
||||
RepOriginId nodeid, int pageno);
|
||||
static void TransactionIdSetCommitTs(TransactionId xid, TimestampTz ts,
|
||||
RepOriginId nodeid, int slotno);
|
||||
RepOriginId nodeid, int slotno);
|
||||
static int ZeroCommitTsPage(int pageno, bool writeXlog);
|
||||
static bool CommitTsPagePrecedes(int page1, int page2);
|
||||
static void WriteZeroPageXlogRec(int pageno);
|
||||
|
@ -141,8 +141,8 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids,
|
|||
return;
|
||||
|
||||
/*
|
||||
* Comply with the WAL-before-data rule: if caller specified it wants
|
||||
* this value to be recorded in WAL, do so before touching the data.
|
||||
* Comply with the WAL-before-data rule: if caller specified it wants this
|
||||
* value to be recorded in WAL, do so before touching the data.
|
||||
*/
|
||||
if (do_xlog)
|
||||
WriteSetTimestampXlogRec(xid, nsubxids, subxids, timestamp, nodeid);
|
||||
|
@ -159,9 +159,9 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids,
|
|||
/*
|
||||
* We split the xids to set the timestamp to in groups belonging to the
|
||||
* same SLRU page; the first element in each such set is its head. The
|
||||
* first group has the main XID as the head; subsequent sets use the
|
||||
* first subxid not on the previous page as head. This way, we only have
|
||||
* to lock/modify each SLRU page once.
|
||||
* first group has the main XID as the head; subsequent sets use the first
|
||||
* subxid not on the previous page as head. This way, we only have to
|
||||
* lock/modify each SLRU page once.
|
||||
*/
|
||||
for (i = 0, headxid = xid;;)
|
||||
{
|
||||
|
@ -183,8 +183,8 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids,
|
|||
break;
|
||||
|
||||
/*
|
||||
* Set the new head and skip over it, as well as over the subxids
|
||||
* we just wrote.
|
||||
* Set the new head and skip over it, as well as over the subxids we
|
||||
* just wrote.
|
||||
*/
|
||||
headxid = subxids[j];
|
||||
i += j - i + 1;
|
||||
|
@ -271,14 +271,14 @@ TransactionIdGetCommitTsData(TransactionId xid, TimestampTz *ts,
|
|||
ereport(ERROR,
|
||||
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("could not get commit timestamp data"),
|
||||
errhint("Make sure the configuration parameter \"%s\" is set.",
|
||||
"track_commit_timestamp")));
|
||||
errhint("Make sure the configuration parameter \"%s\" is set.",
|
||||
"track_commit_timestamp")));
|
||||
|
||||
/* error if the given Xid doesn't normally commit */
|
||||
if (!TransactionIdIsNormal(xid))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("cannot retrieve commit timestamp for transaction %u", xid)));
|
||||
errmsg("cannot retrieve commit timestamp for transaction %u", xid)));
|
||||
|
||||
/*
|
||||
* Return empty if the requested value is outside our valid range.
|
||||
|
@ -350,15 +350,15 @@ TransactionIdGetCommitTsData(TransactionId xid, TimestampTz *ts,
|
|||
TransactionId
|
||||
GetLatestCommitTsData(TimestampTz *ts, RepOriginId *nodeid)
|
||||
{
|
||||
TransactionId xid;
|
||||
TransactionId xid;
|
||||
|
||||
/* Error if module not enabled */
|
||||
if (!track_commit_timestamp)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("could not get commit timestamp data"),
|
||||
errhint("Make sure the configuration parameter \"%s\" is set.",
|
||||
"track_commit_timestamp")));
|
||||
errhint("Make sure the configuration parameter \"%s\" is set.",
|
||||
"track_commit_timestamp")));
|
||||
|
||||
LWLockAcquire(CommitTsLock, LW_SHARED);
|
||||
xid = commitTsShared->xidLastCommit;
|
||||
|
@ -377,9 +377,9 @@ GetLatestCommitTsData(TimestampTz *ts, RepOriginId *nodeid)
|
|||
Datum
|
||||
pg_xact_commit_timestamp(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TransactionId xid = PG_GETARG_UINT32(0);
|
||||
TimestampTz ts;
|
||||
bool found;
|
||||
TransactionId xid = PG_GETARG_UINT32(0);
|
||||
TimestampTz ts;
|
||||
bool found;
|
||||
|
||||
found = TransactionIdGetCommitTsData(xid, &ts, NULL);
|
||||
|
||||
|
@ -393,11 +393,11 @@ pg_xact_commit_timestamp(PG_FUNCTION_ARGS)
|
|||
Datum
|
||||
pg_last_committed_xact(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TransactionId xid;
|
||||
TimestampTz ts;
|
||||
Datum values[2];
|
||||
bool nulls[2];
|
||||
TupleDesc tupdesc;
|
||||
TransactionId xid;
|
||||
TimestampTz ts;
|
||||
Datum values[2];
|
||||
bool nulls[2];
|
||||
TupleDesc tupdesc;
|
||||
HeapTuple htup;
|
||||
|
||||
/* and construct a tuple with our data */
|
||||
|
@ -462,7 +462,7 @@ CommitTsShmemSize(void)
|
|||
void
|
||||
CommitTsShmemInit(void)
|
||||
{
|
||||
bool found;
|
||||
bool found;
|
||||
|
||||
CommitTsCtl->PagePrecedes = CommitTsPagePrecedes;
|
||||
SimpleLruInit(CommitTsCtl, "CommitTs Ctl", CommitTsShmemBuffers(), 0,
|
||||
|
@ -495,8 +495,8 @@ BootStrapCommitTs(void)
|
|||
{
|
||||
/*
|
||||
* Nothing to do here at present, unlike most other SLRU modules; segments
|
||||
* are created when the server is started with this module enabled.
|
||||
* See StartupCommitTs.
|
||||
* are created when the server is started with this module enabled. See
|
||||
* StartupCommitTs.
|
||||
*/
|
||||
}
|
||||
|
||||
|
@ -561,9 +561,9 @@ CompleteCommitTsInitialization(void)
|
|||
|
||||
/*
|
||||
* Activate this module whenever necessary.
|
||||
* This must happen during postmaster or standalong-backend startup,
|
||||
* or during WAL replay anytime the track_commit_timestamp setting is
|
||||
* changed in the master.
|
||||
* This must happen during postmaster or standalong-backend startup,
|
||||
* or during WAL replay anytime the track_commit_timestamp setting is
|
||||
* changed in the master.
|
||||
*
|
||||
* The reason why this SLRU needs separate activation/deactivation functions is
|
||||
* that it can be enabled/disabled during start and the activation/deactivation
|
||||
|
@ -612,7 +612,7 @@ ActivateCommitTs(void)
|
|||
/* Finally, create the current segment file, if necessary */
|
||||
if (!SimpleLruDoesPhysicalPageExist(CommitTsCtl, pageno))
|
||||
{
|
||||
int slotno;
|
||||
int slotno;
|
||||
|
||||
LWLockAcquire(CommitTsControlLock, LW_EXCLUSIVE);
|
||||
slotno = ZeroCommitTsPage(pageno, false);
|
||||
|
@ -834,7 +834,7 @@ WriteSetTimestampXlogRec(TransactionId mainxid, int nsubxids,
|
|||
TransactionId *subxids, TimestampTz timestamp,
|
||||
RepOriginId nodeid)
|
||||
{
|
||||
xl_commit_ts_set record;
|
||||
xl_commit_ts_set record;
|
||||
|
||||
record.timestamp = timestamp;
|
||||
record.nodeid = nodeid;
|
||||
|
@ -907,7 +907,7 @@ commit_ts_redo(XLogReaderState *record)
|
|||
subxids = NULL;
|
||||
|
||||
TransactionTreeSetCommitTsData(setts->mainxid, nsubxids, subxids,
|
||||
setts->timestamp, setts->nodeid, false);
|
||||
setts->timestamp, setts->nodeid, false);
|
||||
if (subxids)
|
||||
pfree(subxids);
|
||||
}
|
||||
|
|
|
@ -965,7 +965,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
|
|||
*/
|
||||
if (!MultiXactIdPrecedes(result, MultiXactState->multiVacLimit) ||
|
||||
(MultiXactState->nextOffset - MultiXactState->oldestOffset
|
||||
> MULTIXACT_MEMBER_SAFE_THRESHOLD))
|
||||
> MULTIXACT_MEMBER_SAFE_THRESHOLD))
|
||||
{
|
||||
/*
|
||||
* For safety's sake, we release MultiXactGenLock while sending
|
||||
|
@ -1190,9 +1190,9 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
|
|||
MultiXactIdSetOldestVisible();
|
||||
|
||||
/*
|
||||
* If we know the multi is used only for locking and not for updates,
|
||||
* then we can skip checking if the value is older than our oldest
|
||||
* visible multi. It cannot possibly still be running.
|
||||
* If we know the multi is used only for locking and not for updates, then
|
||||
* we can skip checking if the value is older than our oldest visible
|
||||
* multi. It cannot possibly still be running.
|
||||
*/
|
||||
if (onlyLock &&
|
||||
MultiXactIdPrecedes(multi, OldestVisibleMXactId[MyBackendId]))
|
||||
|
@ -1207,14 +1207,14 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
|
|||
*
|
||||
* An ID older than MultiXactState->oldestMultiXactId cannot possibly be
|
||||
* useful; it has already been removed, or will be removed shortly, by
|
||||
* truncation. Returning the wrong values could lead
|
||||
* to an incorrect visibility result. However, to support pg_upgrade we
|
||||
* need to allow an empty set to be returned regardless, if the caller is
|
||||
* willing to accept it; the caller is expected to check that it's an
|
||||
* allowed condition (such as ensuring that the infomask bits set on the
|
||||
* tuple are consistent with the pg_upgrade scenario). If the caller is
|
||||
* expecting this to be called only on recently created multis, then we
|
||||
* raise an error.
|
||||
* truncation. Returning the wrong values could lead to an incorrect
|
||||
* visibility result. However, to support pg_upgrade we need to allow an
|
||||
* empty set to be returned regardless, if the caller is willing to accept
|
||||
* it; the caller is expected to check that it's an allowed condition
|
||||
* (such as ensuring that the infomask bits set on the tuple are
|
||||
* consistent with the pg_upgrade scenario). If the caller is expecting
|
||||
* this to be called only on recently created multis, then we raise an
|
||||
* error.
|
||||
*
|
||||
* Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is
|
||||
* seen, it implies undetected ID wraparound has occurred. This raises a
|
||||
|
@ -2123,11 +2123,11 @@ MultiXactSetNextMXact(MultiXactId nextMulti,
|
|||
* enough to contain the next value that would be created.
|
||||
*
|
||||
* We need to do this pretty early during the first startup in binary
|
||||
* upgrade mode: before StartupMultiXact() in fact, because this routine is
|
||||
* called even before that by StartupXLOG(). And we can't do it earlier
|
||||
* than at this point, because during that first call of this routine we
|
||||
* determine the MultiXactState->nextMXact value that MaybeExtendOffsetSlru
|
||||
* needs.
|
||||
* upgrade mode: before StartupMultiXact() in fact, because this routine
|
||||
* is called even before that by StartupXLOG(). And we can't do it
|
||||
* earlier than at this point, because during that first call of this
|
||||
* routine we determine the MultiXactState->nextMXact value that
|
||||
* MaybeExtendOffsetSlru needs.
|
||||
*/
|
||||
if (IsBinaryUpgrade)
|
||||
MaybeExtendOffsetSlru();
|
||||
|
@ -2202,11 +2202,11 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
|
|||
|
||||
/*
|
||||
* Determine the offset of the oldest multixact that might still be
|
||||
* referenced. Normally, we can read the offset from the multixact itself,
|
||||
* but there's an important special case: if there are no multixacts in
|
||||
* existence at all, oldest_datminmxid obviously can't point to one. It
|
||||
* will instead point to the multixact ID that will be assigned the next
|
||||
* time one is needed.
|
||||
* referenced. Normally, we can read the offset from the multixact
|
||||
* itself, but there's an important special case: if there are no
|
||||
* multixacts in existence at all, oldest_datminmxid obviously can't point
|
||||
* to one. It will instead point to the multixact ID that will be
|
||||
* assigned the next time one is needed.
|
||||
*
|
||||
* NB: oldest_dataminmxid is the oldest multixact that might still be
|
||||
* referenced from a table, unlike in DetermineSafeOldestOffset, where we
|
||||
|
@ -2520,10 +2520,9 @@ DetermineSafeOldestOffset(MultiXactId oldestMXact)
|
|||
* obviously can't point to one. It will instead point to the multixact
|
||||
* ID that will be assigned the next time one is needed.
|
||||
*
|
||||
* NB: oldestMXact should be the oldest multixact that still exists in
|
||||
* the SLRU, unlike in SetMultiXactIdLimit, where we do this same
|
||||
* computation based on the oldest value that might be referenced in a
|
||||
* table.
|
||||
* NB: oldestMXact should be the oldest multixact that still exists in the
|
||||
* SLRU, unlike in SetMultiXactIdLimit, where we do this same computation
|
||||
* based on the oldest value that might be referenced in a table.
|
||||
*/
|
||||
LWLockAcquire(MultiXactGenLock, LW_SHARED);
|
||||
if (MultiXactState->nextMXact == oldestMXact)
|
||||
|
@ -2679,9 +2678,9 @@ int
|
|||
MultiXactMemberFreezeThreshold(void)
|
||||
{
|
||||
MultiXactOffset members;
|
||||
uint32 multixacts;
|
||||
uint32 victim_multixacts;
|
||||
double fraction;
|
||||
uint32 multixacts;
|
||||
uint32 victim_multixacts;
|
||||
double fraction;
|
||||
|
||||
ReadMultiXactCounts(&multixacts, &members);
|
||||
|
||||
|
@ -2800,7 +2799,7 @@ SlruScanDirCbFindEarliest(SlruCtl ctl, char *filename, int segpage, void *data)
|
|||
void
|
||||
TruncateMultiXact(void)
|
||||
{
|
||||
MultiXactId oldestMXact;
|
||||
MultiXactId oldestMXact;
|
||||
MultiXactOffset oldestOffset;
|
||||
MultiXactOffset nextOffset;
|
||||
mxtruncinfo trunc;
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
* without blocking. That way, a worker that errors out can write the whole
|
||||
* message into the queue and terminate without waiting for the user backend.
|
||||
*/
|
||||
#define PARALLEL_ERROR_QUEUE_SIZE 16384
|
||||
#define PARALLEL_ERROR_QUEUE_SIZE 16384
|
||||
|
||||
/* Magic number for parallel context TOC. */
|
||||
#define PARALLEL_MAGIC 0x50477c7c
|
||||
|
@ -71,7 +71,7 @@ typedef struct FixedParallelState
|
|||
BackendId parallel_master_backend_id;
|
||||
|
||||
/* Entrypoint for parallel workers. */
|
||||
parallel_worker_main_type entrypoint;
|
||||
parallel_worker_main_type entrypoint;
|
||||
|
||||
/* Mutex protects remaining fields. */
|
||||
slock_t mutex;
|
||||
|
@ -90,10 +90,10 @@ typedef struct FixedParallelState
|
|||
* and < the number of workers before any user code is invoked; each parallel
|
||||
* worker will get a different parallel worker number.
|
||||
*/
|
||||
int ParallelWorkerNumber = -1;
|
||||
int ParallelWorkerNumber = -1;
|
||||
|
||||
/* Is there a parallel message pending which we need to receive? */
|
||||
bool ParallelMessagePending = false;
|
||||
bool ParallelMessagePending = false;
|
||||
|
||||
/* Pointer to our fixed parallel state. */
|
||||
static FixedParallelState *MyFixedParallelState;
|
||||
|
@ -115,8 +115,8 @@ static void ParallelWorkerMain(Datum main_arg);
|
|||
ParallelContext *
|
||||
CreateParallelContext(parallel_worker_main_type entrypoint, int nworkers)
|
||||
{
|
||||
MemoryContext oldcontext;
|
||||
ParallelContext *pcxt;
|
||||
MemoryContext oldcontext;
|
||||
ParallelContext *pcxt;
|
||||
|
||||
/* It is unsafe to create a parallel context if not in parallel mode. */
|
||||
Assert(IsInParallelMode());
|
||||
|
@ -159,7 +159,7 @@ CreateParallelContextForExternalFunction(char *library_name,
|
|||
char *function_name,
|
||||
int nworkers)
|
||||
{
|
||||
MemoryContext oldcontext;
|
||||
MemoryContext oldcontext;
|
||||
ParallelContext *pcxt;
|
||||
|
||||
/* We might be running in a very short-lived memory context. */
|
||||
|
@ -184,15 +184,15 @@ CreateParallelContextForExternalFunction(char *library_name,
|
|||
void
|
||||
InitializeParallelDSM(ParallelContext *pcxt)
|
||||
{
|
||||
MemoryContext oldcontext;
|
||||
Size library_len = 0;
|
||||
Size guc_len = 0;
|
||||
Size combocidlen = 0;
|
||||
Size tsnaplen = 0;
|
||||
Size asnaplen = 0;
|
||||
Size tstatelen = 0;
|
||||
Size segsize = 0;
|
||||
int i;
|
||||
MemoryContext oldcontext;
|
||||
Size library_len = 0;
|
||||
Size guc_len = 0;
|
||||
Size combocidlen = 0;
|
||||
Size tsnaplen = 0;
|
||||
Size asnaplen = 0;
|
||||
Size tstatelen = 0;
|
||||
Size segsize = 0;
|
||||
int i;
|
||||
FixedParallelState *fps;
|
||||
Snapshot transaction_snapshot = GetTransactionSnapshot();
|
||||
Snapshot active_snapshot = GetActiveSnapshot();
|
||||
|
@ -205,8 +205,8 @@ InitializeParallelDSM(ParallelContext *pcxt)
|
|||
shm_toc_estimate_keys(&pcxt->estimator, 1);
|
||||
|
||||
/*
|
||||
* Normally, the user will have requested at least one worker process,
|
||||
* but if by chance they have not, we can skip a bunch of things here.
|
||||
* Normally, the user will have requested at least one worker process, but
|
||||
* if by chance they have not, we can skip a bunch of things here.
|
||||
*/
|
||||
if (pcxt->nworkers > 0)
|
||||
{
|
||||
|
@ -228,8 +228,8 @@ InitializeParallelDSM(ParallelContext *pcxt)
|
|||
|
||||
/* Estimate space need for error queues. */
|
||||
StaticAssertStmt(BUFFERALIGN(PARALLEL_ERROR_QUEUE_SIZE) ==
|
||||
PARALLEL_ERROR_QUEUE_SIZE,
|
||||
"parallel error queue size not buffer-aligned");
|
||||
PARALLEL_ERROR_QUEUE_SIZE,
|
||||
"parallel error queue size not buffer-aligned");
|
||||
shm_toc_estimate_chunk(&pcxt->estimator,
|
||||
PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers);
|
||||
shm_toc_estimate_keys(&pcxt->estimator, 1);
|
||||
|
@ -251,9 +251,9 @@ InitializeParallelDSM(ParallelContext *pcxt)
|
|||
* memory segment; instead, just use backend-private memory.
|
||||
*
|
||||
* Also, if we can't create a dynamic shared memory segment because the
|
||||
* maximum number of segments have already been created, then fall back
|
||||
* to backend-private memory, and plan not to use any workers. We hope
|
||||
* this won't happen very often, but it's better to abandon the use of
|
||||
* maximum number of segments have already been created, then fall back to
|
||||
* backend-private memory, and plan not to use any workers. We hope this
|
||||
* won't happen very often, but it's better to abandon the use of
|
||||
* parallelism than to fail outright.
|
||||
*/
|
||||
segsize = shm_toc_estimate(&pcxt->estimator);
|
||||
|
@ -290,13 +290,13 @@ InitializeParallelDSM(ParallelContext *pcxt)
|
|||
/* We can skip the rest of this if we're not budgeting for any workers. */
|
||||
if (pcxt->nworkers > 0)
|
||||
{
|
||||
char *libraryspace;
|
||||
char *gucspace;
|
||||
char *combocidspace;
|
||||
char *tsnapspace;
|
||||
char *asnapspace;
|
||||
char *tstatespace;
|
||||
char *error_queue_space;
|
||||
char *libraryspace;
|
||||
char *gucspace;
|
||||
char *combocidspace;
|
||||
char *tsnapspace;
|
||||
char *asnapspace;
|
||||
char *tstatespace;
|
||||
char *error_queue_space;
|
||||
|
||||
/* Serialize shared libraries we have loaded. */
|
||||
libraryspace = shm_toc_allocate(pcxt->toc, library_len);
|
||||
|
@ -338,12 +338,12 @@ InitializeParallelDSM(ParallelContext *pcxt)
|
|||
* should be transmitted via separate (possibly larger?) queues.
|
||||
*/
|
||||
error_queue_space =
|
||||
shm_toc_allocate(pcxt->toc,
|
||||
PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers);
|
||||
shm_toc_allocate(pcxt->toc,
|
||||
PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers);
|
||||
for (i = 0; i < pcxt->nworkers; ++i)
|
||||
{
|
||||
char *start;
|
||||
shm_mq *mq;
|
||||
char *start;
|
||||
shm_mq *mq;
|
||||
|
||||
start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE;
|
||||
mq = shm_mq_create(start, PARALLEL_ERROR_QUEUE_SIZE);
|
||||
|
@ -355,8 +355,8 @@ InitializeParallelDSM(ParallelContext *pcxt)
|
|||
/* Serialize extension entrypoint information. */
|
||||
if (pcxt->library_name != NULL)
|
||||
{
|
||||
Size lnamelen = strlen(pcxt->library_name);
|
||||
char *extensionstate;
|
||||
Size lnamelen = strlen(pcxt->library_name);
|
||||
char *extensionstate;
|
||||
|
||||
extensionstate = shm_toc_allocate(pcxt->toc, lnamelen
|
||||
+ strlen(pcxt->function_name) + 2);
|
||||
|
@ -377,10 +377,10 @@ InitializeParallelDSM(ParallelContext *pcxt)
|
|||
void
|
||||
LaunchParallelWorkers(ParallelContext *pcxt)
|
||||
{
|
||||
MemoryContext oldcontext;
|
||||
BackgroundWorker worker;
|
||||
int i;
|
||||
bool any_registrations_failed = false;
|
||||
MemoryContext oldcontext;
|
||||
BackgroundWorker worker;
|
||||
int i;
|
||||
bool any_registrations_failed = false;
|
||||
|
||||
/* Skip this if we have no workers. */
|
||||
if (pcxt->nworkers == 0)
|
||||
|
@ -408,8 +408,8 @@ LaunchParallelWorkers(ParallelContext *pcxt)
|
|||
*
|
||||
* The caller must be able to tolerate ending up with fewer workers than
|
||||
* expected, so there is no need to throw an error here if registration
|
||||
* fails. It wouldn't help much anyway, because registering the worker
|
||||
* in no way guarantees that it will start up and initialize successfully.
|
||||
* fails. It wouldn't help much anyway, because registering the worker in
|
||||
* no way guarantees that it will start up and initialize successfully.
|
||||
*/
|
||||
for (i = 0; i < pcxt->nworkers; ++i)
|
||||
{
|
||||
|
@ -421,8 +421,8 @@ LaunchParallelWorkers(ParallelContext *pcxt)
|
|||
else
|
||||
{
|
||||
/*
|
||||
* If we weren't able to register the worker, then we've bumped
|
||||
* up against the max_worker_processes limit, and future
|
||||
* If we weren't able to register the worker, then we've bumped up
|
||||
* against the max_worker_processes limit, and future
|
||||
* registrations will probably fail too, so arrange to skip them.
|
||||
* But we still have to execute this code for the remaining slots
|
||||
* to make sure that we forget about the error queues we budgeted
|
||||
|
@ -455,13 +455,13 @@ WaitForParallelWorkersToFinish(ParallelContext *pcxt)
|
|||
{
|
||||
for (;;)
|
||||
{
|
||||
bool anyone_alive = false;
|
||||
int i;
|
||||
bool anyone_alive = false;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* This will process any parallel messages that are pending, which
|
||||
* may change the outcome of the loop that follows. It may also
|
||||
* throw an error propagated from a worker.
|
||||
* This will process any parallel messages that are pending, which may
|
||||
* change the outcome of the loop that follows. It may also throw an
|
||||
* error propagated from a worker.
|
||||
*/
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
|
||||
|
@ -502,7 +502,7 @@ WaitForParallelWorkersToFinish(ParallelContext *pcxt)
|
|||
void
|
||||
DestroyParallelContext(ParallelContext *pcxt)
|
||||
{
|
||||
int i;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Be careful about order of operations here! We remove the parallel
|
||||
|
@ -548,7 +548,7 @@ DestroyParallelContext(ParallelContext *pcxt)
|
|||
/* Wait until the workers actually die. */
|
||||
for (i = 0; i < pcxt->nworkers; ++i)
|
||||
{
|
||||
BgwHandleStatus status;
|
||||
BgwHandleStatus status;
|
||||
|
||||
if (pcxt->worker[i].bgwhandle == NULL)
|
||||
continue;
|
||||
|
@ -626,9 +626,9 @@ HandleParallelMessages(void)
|
|||
dlist_foreach(iter, &pcxt_list)
|
||||
{
|
||||
ParallelContext *pcxt;
|
||||
int i;
|
||||
Size nbytes;
|
||||
void *data;
|
||||
int i;
|
||||
Size nbytes;
|
||||
void *data;
|
||||
|
||||
pcxt = dlist_container(ParallelContext, node, iter.cur);
|
||||
if (pcxt->worker == NULL)
|
||||
|
@ -637,14 +637,14 @@ HandleParallelMessages(void)
|
|||
for (i = 0; i < pcxt->nworkers; ++i)
|
||||
{
|
||||
/*
|
||||
* Read as many messages as we can from each worker, but stop
|
||||
* when either (1) the error queue goes away, which can happen if
|
||||
* we receive a Terminate message from the worker; or (2) no more
|
||||
* Read as many messages as we can from each worker, but stop when
|
||||
* either (1) the error queue goes away, which can happen if we
|
||||
* receive a Terminate message from the worker; or (2) no more
|
||||
* messages can be read from the worker without blocking.
|
||||
*/
|
||||
while (pcxt->worker[i].error_mqh != NULL)
|
||||
{
|
||||
shm_mq_result res;
|
||||
shm_mq_result res;
|
||||
|
||||
res = shm_mq_receive(pcxt->worker[i].error_mqh, &nbytes,
|
||||
&data, true);
|
||||
|
@ -652,7 +652,7 @@ HandleParallelMessages(void)
|
|||
break;
|
||||
else if (res == SHM_MQ_SUCCESS)
|
||||
{
|
||||
StringInfoData msg;
|
||||
StringInfoData msg;
|
||||
|
||||
initStringInfo(&msg);
|
||||
appendBinaryStringInfo(&msg, data, nbytes);
|
||||
|
@ -661,7 +661,7 @@ HandleParallelMessages(void)
|
|||
}
|
||||
else
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INTERNAL_ERROR), /* XXX: wrong errcode? */
|
||||
(errcode(ERRCODE_INTERNAL_ERROR), /* XXX: wrong errcode? */
|
||||
errmsg("lost connection to parallel worker")));
|
||||
|
||||
/* This might make the error queue go away. */
|
||||
|
@ -677,23 +677,24 @@ HandleParallelMessages(void)
|
|||
static void
|
||||
HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg)
|
||||
{
|
||||
char msgtype;
|
||||
char msgtype;
|
||||
|
||||
msgtype = pq_getmsgbyte(msg);
|
||||
|
||||
switch (msgtype)
|
||||
{
|
||||
case 'K': /* BackendKeyData */
|
||||
case 'K': /* BackendKeyData */
|
||||
{
|
||||
int32 pid = pq_getmsgint(msg, 4);
|
||||
int32 pid = pq_getmsgint(msg, 4);
|
||||
|
||||
(void) pq_getmsgint(msg, 4); /* discard cancel key */
|
||||
(void) pq_getmsgend(msg);
|
||||
pcxt->worker[i].pid = pid;
|
||||
break;
|
||||
}
|
||||
|
||||
case 'E': /* ErrorResponse */
|
||||
case 'N': /* NoticeResponse */
|
||||
case 'E': /* ErrorResponse */
|
||||
case 'N': /* NoticeResponse */
|
||||
{
|
||||
ErrorData edata;
|
||||
ErrorContextCallback errctx;
|
||||
|
@ -725,14 +726,14 @@ HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg)
|
|||
break;
|
||||
}
|
||||
|
||||
case 'A': /* NotifyResponse */
|
||||
case 'A': /* NotifyResponse */
|
||||
{
|
||||
/* Propagate NotifyResponse. */
|
||||
pq_putmessage(msg->data[0], &msg->data[1], msg->len - 1);
|
||||
break;
|
||||
}
|
||||
|
||||
case 'X': /* Terminate, indicating clean exit */
|
||||
case 'X': /* Terminate, indicating clean exit */
|
||||
{
|
||||
pfree(pcxt->worker[i].bgwhandle);
|
||||
pfree(pcxt->worker[i].error_mqh);
|
||||
|
@ -797,18 +798,18 @@ static void
|
|||
ParallelWorkerMain(Datum main_arg)
|
||||
{
|
||||
dsm_segment *seg;
|
||||
shm_toc *toc;
|
||||
shm_toc *toc;
|
||||
FixedParallelState *fps;
|
||||
char *error_queue_space;
|
||||
shm_mq *mq;
|
||||
char *error_queue_space;
|
||||
shm_mq *mq;
|
||||
shm_mq_handle *mqh;
|
||||
char *libraryspace;
|
||||
char *gucspace;
|
||||
char *combocidspace;
|
||||
char *tsnapspace;
|
||||
char *asnapspace;
|
||||
char *tstatespace;
|
||||
StringInfoData msgbuf;
|
||||
char *libraryspace;
|
||||
char *gucspace;
|
||||
char *combocidspace;
|
||||
char *tsnapspace;
|
||||
char *asnapspace;
|
||||
char *tstatespace;
|
||||
StringInfoData msgbuf;
|
||||
|
||||
/* Establish signal handlers. */
|
||||
pqsignal(SIGTERM, die);
|
||||
|
@ -824,8 +825,8 @@ ParallelWorkerMain(Datum main_arg)
|
|||
ALLOCSET_DEFAULT_MAXSIZE);
|
||||
|
||||
/*
|
||||
* Now that we have a resource owner, we can attach to the dynamic
|
||||
* shared memory segment and read the table of contents.
|
||||
* Now that we have a resource owner, we can attach to the dynamic shared
|
||||
* memory segment and read the table of contents.
|
||||
*/
|
||||
seg = dsm_attach(DatumGetUInt32(main_arg));
|
||||
if (seg == NULL)
|
||||
|
@ -836,7 +837,7 @@ ParallelWorkerMain(Datum main_arg)
|
|||
if (toc == NULL)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("bad magic number in dynamic shared memory segment")));
|
||||
errmsg("bad magic number in dynamic shared memory segment")));
|
||||
|
||||
/* Determine and set our worker number. */
|
||||
fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED);
|
||||
|
@ -860,7 +861,7 @@ ParallelWorkerMain(Datum main_arg)
|
|||
*/
|
||||
error_queue_space = shm_toc_lookup(toc, PARALLEL_KEY_ERROR_QUEUE);
|
||||
mq = (shm_mq *) (error_queue_space +
|
||||
ParallelWorkerNumber * PARALLEL_ERROR_QUEUE_SIZE);
|
||||
ParallelWorkerNumber * PARALLEL_ERROR_QUEUE_SIZE);
|
||||
shm_mq_set_sender(mq, MyProc);
|
||||
mqh = shm_mq_attach(mq, seg, NULL);
|
||||
pq_redirect_to_shm_mq(mq, mqh);
|
||||
|
@ -870,9 +871,9 @@ ParallelWorkerMain(Datum main_arg)
|
|||
/*
|
||||
* Send a BackendKeyData message to the process that initiated parallelism
|
||||
* so that it has access to our PID before it receives any other messages
|
||||
* from us. Our cancel key is sent, too, since that's the way the protocol
|
||||
* message is defined, but it won't actually be used for anything in this
|
||||
* case.
|
||||
* from us. Our cancel key is sent, too, since that's the way the
|
||||
* protocol message is defined, but it won't actually be used for anything
|
||||
* in this case.
|
||||
*/
|
||||
pq_beginmessage(&msgbuf, 'K');
|
||||
pq_sendint(&msgbuf, (int32) MyProcPid, sizeof(int32));
|
||||
|
@ -880,13 +881,13 @@ ParallelWorkerMain(Datum main_arg)
|
|||
pq_endmessage(&msgbuf);
|
||||
|
||||
/*
|
||||
* Hooray! Primary initialization is complete. Now, we need to set up
|
||||
* our backend-local state to match the original backend.
|
||||
* Hooray! Primary initialization is complete. Now, we need to set up our
|
||||
* backend-local state to match the original backend.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Load libraries that were loaded by original backend. We want to do this
|
||||
* before restoring GUCs, because the libraries might define custom
|
||||
* Load libraries that were loaded by original backend. We want to do
|
||||
* this before restoring GUCs, because the libraries might define custom
|
||||
* variables.
|
||||
*/
|
||||
libraryspace = shm_toc_lookup(toc, PARALLEL_KEY_LIBRARY);
|
||||
|
@ -928,7 +929,8 @@ ParallelWorkerMain(Datum main_arg)
|
|||
SetUserIdAndSecContext(fps->current_user_id, fps->sec_context);
|
||||
|
||||
/*
|
||||
* We've initialized all of our state now; nothing should change hereafter.
|
||||
* We've initialized all of our state now; nothing should change
|
||||
* hereafter.
|
||||
*/
|
||||
EnterParallelMode();
|
||||
|
||||
|
@ -965,9 +967,9 @@ ParallelWorkerMain(Datum main_arg)
|
|||
static void
|
||||
ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc)
|
||||
{
|
||||
char *extensionstate;
|
||||
char *library_name;
|
||||
char *function_name;
|
||||
char *extensionstate;
|
||||
char *library_name;
|
||||
char *function_name;
|
||||
parallel_worker_main_type entrypt;
|
||||
|
||||
extensionstate = shm_toc_lookup(toc, PARALLEL_KEY_EXTENSION_TRAMPOLINE);
|
||||
|
@ -988,7 +990,7 @@ ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc)
|
|||
static void
|
||||
ParallelErrorContext(void *arg)
|
||||
{
|
||||
errcontext("parallel worker, pid %d", * (int32 *) arg);
|
||||
errcontext("parallel worker, pid %d", *(int32 *) arg);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -117,7 +117,7 @@ typedef struct GlobalTransactionData
|
|||
TimestampTz prepared_at; /* time of preparation */
|
||||
XLogRecPtr prepare_lsn; /* XLOG offset of prepare record */
|
||||
Oid owner; /* ID of user that executed the xact */
|
||||
BackendId locking_backend; /* backend currently working on the xact */
|
||||
BackendId locking_backend; /* backend currently working on the xact */
|
||||
bool valid; /* TRUE if PGPROC entry is in proc array */
|
||||
char gid[GIDSIZE]; /* The GID assigned to the prepared xact */
|
||||
} GlobalTransactionData;
|
||||
|
@ -256,24 +256,24 @@ AtAbort_Twophase(void)
|
|||
return;
|
||||
|
||||
/*
|
||||
* What to do with the locked global transaction entry? If we were in
|
||||
* the process of preparing the transaction, but haven't written the WAL
|
||||
* What to do with the locked global transaction entry? If we were in the
|
||||
* process of preparing the transaction, but haven't written the WAL
|
||||
* record and state file yet, the transaction must not be considered as
|
||||
* prepared. Likewise, if we are in the process of finishing an
|
||||
* already-prepared transaction, and fail after having already written
|
||||
* the 2nd phase commit or rollback record to the WAL, the transaction
|
||||
* should not be considered as prepared anymore. In those cases, just
|
||||
* remove the entry from shared memory.
|
||||
* already-prepared transaction, and fail after having already written the
|
||||
* 2nd phase commit or rollback record to the WAL, the transaction should
|
||||
* not be considered as prepared anymore. In those cases, just remove the
|
||||
* entry from shared memory.
|
||||
*
|
||||
* Otherwise, the entry must be left in place so that the transaction
|
||||
* can be finished later, so just unlock it.
|
||||
* Otherwise, the entry must be left in place so that the transaction can
|
||||
* be finished later, so just unlock it.
|
||||
*
|
||||
* If we abort during prepare, after having written the WAL record, we
|
||||
* might not have transferred all locks and other state to the prepared
|
||||
* transaction yet. Likewise, if we abort during commit or rollback,
|
||||
* after having written the WAL record, we might not have released
|
||||
* all the resources held by the transaction yet. In those cases, the
|
||||
* in-memory state can be wrong, but it's too late to back out.
|
||||
* after having written the WAL record, we might not have released all the
|
||||
* resources held by the transaction yet. In those cases, the in-memory
|
||||
* state can be wrong, but it's too late to back out.
|
||||
*/
|
||||
if (!MyLockedGxact->valid)
|
||||
{
|
||||
|
@ -408,8 +408,8 @@ MarkAsPreparing(TransactionId xid, const char *gid,
|
|||
TwoPhaseState->prepXacts[TwoPhaseState->numPrepXacts++] = gxact;
|
||||
|
||||
/*
|
||||
* Remember that we have this GlobalTransaction entry locked for us.
|
||||
* If we abort after this, we must release it.
|
||||
* Remember that we have this GlobalTransaction entry locked for us. If we
|
||||
* abort after this, we must release it.
|
||||
*/
|
||||
MyLockedGxact = gxact;
|
||||
|
||||
|
@ -499,8 +499,8 @@ LockGXact(const char *gid, Oid user)
|
|||
if (gxact->locking_backend != InvalidBackendId)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("prepared transaction with identifier \"%s\" is busy",
|
||||
gid)));
|
||||
errmsg("prepared transaction with identifier \"%s\" is busy",
|
||||
gid)));
|
||||
|
||||
if (user != gxact->owner && !superuser_arg(user))
|
||||
ereport(ERROR,
|
||||
|
@ -1423,8 +1423,8 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
|
|||
|
||||
/*
|
||||
* In case we fail while running the callbacks, mark the gxact invalid so
|
||||
* no one else will try to commit/rollback, and so it will be recycled
|
||||
* if we fail after this point. It is still locked by our backend so it
|
||||
* no one else will try to commit/rollback, and so it will be recycled if
|
||||
* we fail after this point. It is still locked by our backend so it
|
||||
* won't go away yet.
|
||||
*
|
||||
* (We assume it's safe to do this without taking TwoPhaseStateLock.)
|
||||
|
@ -2055,8 +2055,9 @@ RecoverPreparedTransactions(void)
|
|||
StandbyReleaseLockTree(xid, hdr->nsubxacts, subxids);
|
||||
|
||||
/*
|
||||
* We're done with recovering this transaction. Clear MyLockedGxact,
|
||||
* like we do in PrepareTransaction() during normal operation.
|
||||
* We're done with recovering this transaction. Clear
|
||||
* MyLockedGxact, like we do in PrepareTransaction() during normal
|
||||
* operation.
|
||||
*/
|
||||
PostPrepare_Twophase();
|
||||
|
||||
|
|
|
@ -102,9 +102,9 @@ int synchronous_commit = SYNCHRONOUS_COMMIT_ON;
|
|||
* The XIDs are stored sorted in numerical order (not logical order) to make
|
||||
* lookups as fast as possible.
|
||||
*/
|
||||
TransactionId XactTopTransactionId = InvalidTransactionId;
|
||||
int nParallelCurrentXids = 0;
|
||||
TransactionId *ParallelCurrentXids;
|
||||
TransactionId XactTopTransactionId = InvalidTransactionId;
|
||||
int nParallelCurrentXids = 0;
|
||||
TransactionId *ParallelCurrentXids;
|
||||
|
||||
/*
|
||||
* MyXactAccessedTempRel is set when a temporary relation is accessed.
|
||||
|
@ -142,7 +142,7 @@ typedef enum TBlockState
|
|||
/* transaction block states */
|
||||
TBLOCK_BEGIN, /* starting transaction block */
|
||||
TBLOCK_INPROGRESS, /* live transaction */
|
||||
TBLOCK_PARALLEL_INPROGRESS, /* live transaction inside parallel worker */
|
||||
TBLOCK_PARALLEL_INPROGRESS, /* live transaction inside parallel worker */
|
||||
TBLOCK_END, /* COMMIT received */
|
||||
TBLOCK_ABORT, /* failed xact, awaiting ROLLBACK */
|
||||
TBLOCK_ABORT_END, /* failed xact, ROLLBACK received */
|
||||
|
@ -184,7 +184,7 @@ typedef struct TransactionStateData
|
|||
bool prevXactReadOnly; /* entry-time xact r/o state */
|
||||
bool startedInRecovery; /* did we start in recovery? */
|
||||
bool didLogXid; /* has xid been included in WAL record? */
|
||||
int parallelModeLevel; /* Enter/ExitParallelMode counter */
|
||||
int parallelModeLevel; /* Enter/ExitParallelMode counter */
|
||||
struct TransactionStateData *parent; /* back link to parent */
|
||||
} TransactionStateData;
|
||||
|
||||
|
@ -494,8 +494,8 @@ AssignTransactionId(TransactionState s)
|
|||
Assert(s->state == TRANS_INPROGRESS);
|
||||
|
||||
/*
|
||||
* Workers synchronize transaction state at the beginning of each
|
||||
* parallel operation, so we can't account for new XIDs at this point.
|
||||
* Workers synchronize transaction state at the beginning of each parallel
|
||||
* operation, so we can't account for new XIDs at this point.
|
||||
*/
|
||||
if (IsInParallelMode())
|
||||
elog(ERROR, "cannot assign XIDs during a parallel operation");
|
||||
|
@ -788,10 +788,10 @@ TransactionIdIsCurrentTransactionId(TransactionId xid)
|
|||
return false;
|
||||
|
||||
/*
|
||||
* In parallel workers, the XIDs we must consider as current are stored
|
||||
* in ParallelCurrentXids rather than the transaction-state stack. Note
|
||||
* that the XIDs in this array are sorted numerically rather than
|
||||
* according to transactionIdPrecedes order.
|
||||
* In parallel workers, the XIDs we must consider as current are stored in
|
||||
* ParallelCurrentXids rather than the transaction-state stack. Note that
|
||||
* the XIDs in this array are sorted numerically rather than according to
|
||||
* transactionIdPrecedes order.
|
||||
*/
|
||||
if (nParallelCurrentXids > 0)
|
||||
{
|
||||
|
@ -1204,7 +1204,7 @@ RecordTransactionCommit(void)
|
|||
nchildren, children, nrels, rels,
|
||||
nmsgs, invalMessages,
|
||||
RelcacheInitFileInval, forceSyncCommit,
|
||||
InvalidTransactionId /* plain commit */);
|
||||
InvalidTransactionId /* plain commit */ );
|
||||
|
||||
/*
|
||||
* Record plain commit ts if not replaying remote actions, or if no
|
||||
|
@ -1505,7 +1505,7 @@ RecordTransactionAbort(bool isSubXact)
|
|||
RelFileNode *rels;
|
||||
int nchildren;
|
||||
TransactionId *children;
|
||||
TimestampTz xact_time;
|
||||
TimestampTz xact_time;
|
||||
|
||||
/*
|
||||
* If we haven't been assigned an XID, nobody will care whether we aborted
|
||||
|
@ -2316,8 +2316,8 @@ PrepareTransaction(void)
|
|||
|
||||
/*
|
||||
* In normal commit-processing, this is all non-critical post-transaction
|
||||
* cleanup. When the transaction is prepared, however, it's important that
|
||||
* the locks and other per-backend resources are transferred to the
|
||||
* cleanup. When the transaction is prepared, however, it's important
|
||||
* that the locks and other per-backend resources are transferred to the
|
||||
* prepared transaction's PGPROC entry. Note that if an error is raised
|
||||
* here, it's too late to abort the transaction. XXX: This probably should
|
||||
* be in a critical section, to force a PANIC if any of this fails, but
|
||||
|
@ -2358,9 +2358,8 @@ PrepareTransaction(void)
|
|||
|
||||
/*
|
||||
* Allow another backend to finish the transaction. After
|
||||
* PostPrepare_Twophase(), the transaction is completely detached from
|
||||
* our backend. The rest is just non-critical cleanup of backend-local
|
||||
* state.
|
||||
* PostPrepare_Twophase(), the transaction is completely detached from our
|
||||
* backend. The rest is just non-critical cleanup of backend-local state.
|
||||
*/
|
||||
PostPrepare_Twophase();
|
||||
|
||||
|
@ -2417,7 +2416,7 @@ AbortTransaction(void)
|
|||
{
|
||||
TransactionState s = CurrentTransactionState;
|
||||
TransactionId latestXid;
|
||||
bool is_parallel_worker;
|
||||
bool is_parallel_worker;
|
||||
|
||||
/* Prevent cancel/die interrupt while cleaning up */
|
||||
HOLD_INTERRUPTS();
|
||||
|
@ -2520,9 +2519,9 @@ AbortTransaction(void)
|
|||
latestXid = InvalidTransactionId;
|
||||
|
||||
/*
|
||||
* Since the parallel master won't get our value of XactLastRecEnd in this
|
||||
* case, we nudge WAL-writer ourselves in this case. See related comments in
|
||||
* RecordTransactionAbort for why this matters.
|
||||
* Since the parallel master won't get our value of XactLastRecEnd in
|
||||
* this case, we nudge WAL-writer ourselves in this case. See related
|
||||
* comments in RecordTransactionAbort for why this matters.
|
||||
*/
|
||||
XLogSetAsyncXactLSN(XactLastRecEnd);
|
||||
}
|
||||
|
@ -3720,7 +3719,7 @@ DefineSavepoint(char *name)
|
|||
if (IsInParallelMode())
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
|
||||
errmsg("cannot define savepoints during a parallel operation")));
|
||||
errmsg("cannot define savepoints during a parallel operation")));
|
||||
|
||||
switch (s->blockState)
|
||||
{
|
||||
|
@ -3787,7 +3786,7 @@ ReleaseSavepoint(List *options)
|
|||
if (IsInParallelMode())
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
|
||||
errmsg("cannot release savepoints during a parallel operation")));
|
||||
errmsg("cannot release savepoints during a parallel operation")));
|
||||
|
||||
switch (s->blockState)
|
||||
{
|
||||
|
@ -3900,7 +3899,7 @@ RollbackToSavepoint(List *options)
|
|||
if (IsInParallelMode())
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
|
||||
errmsg("cannot rollback to savepoints during a parallel operation")));
|
||||
errmsg("cannot rollback to savepoints during a parallel operation")));
|
||||
|
||||
switch (s->blockState)
|
||||
{
|
||||
|
@ -4017,17 +4016,18 @@ BeginInternalSubTransaction(char *name)
|
|||
|
||||
/*
|
||||
* Workers synchronize transaction state at the beginning of each parallel
|
||||
* operation, so we can't account for new subtransactions after that point.
|
||||
* We might be able to make an exception for the type of subtransaction
|
||||
* established by this function, which is typically used in contexts where
|
||||
* we're going to release or roll back the subtransaction before proceeding
|
||||
* further, so that no enduring change to the transaction state occurs.
|
||||
* For now, however, we prohibit this case along with all the others.
|
||||
* operation, so we can't account for new subtransactions after that
|
||||
* point. We might be able to make an exception for the type of
|
||||
* subtransaction established by this function, which is typically used in
|
||||
* contexts where we're going to release or roll back the subtransaction
|
||||
* before proceeding further, so that no enduring change to the
|
||||
* transaction state occurs. For now, however, we prohibit this case along
|
||||
* with all the others.
|
||||
*/
|
||||
if (IsInParallelMode())
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
|
||||
errmsg("cannot start subtransactions during a parallel operation")));
|
||||
errmsg("cannot start subtransactions during a parallel operation")));
|
||||
|
||||
switch (s->blockState)
|
||||
{
|
||||
|
@ -4094,7 +4094,7 @@ ReleaseCurrentSubTransaction(void)
|
|||
if (IsInParallelMode())
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
|
||||
errmsg("cannot commit subtransactions during a parallel operation")));
|
||||
errmsg("cannot commit subtransactions during a parallel operation")));
|
||||
|
||||
if (s->blockState != TBLOCK_SUBINPROGRESS)
|
||||
elog(ERROR, "ReleaseCurrentSubTransaction: unexpected state %s",
|
||||
|
@ -4773,7 +4773,8 @@ Size
|
|||
EstimateTransactionStateSpace(void)
|
||||
{
|
||||
TransactionState s;
|
||||
Size nxids = 5; /* iso level, deferrable, top & current XID, XID count */
|
||||
Size nxids = 5; /* iso level, deferrable, top & current XID,
|
||||
* XID count */
|
||||
|
||||
for (s = CurrentTransactionState; s != NULL; s = s->parent)
|
||||
{
|
||||
|
@ -4804,8 +4805,8 @@ void
|
|||
SerializeTransactionState(Size maxsize, char *start_address)
|
||||
{
|
||||
TransactionState s;
|
||||
Size nxids = 0;
|
||||
Size i = 0;
|
||||
Size nxids = 0;
|
||||
Size i = 0;
|
||||
TransactionId *workspace;
|
||||
TransactionId *result = (TransactionId *) start_address;
|
||||
|
||||
|
@ -4830,8 +4831,8 @@ SerializeTransactionState(Size maxsize, char *start_address)
|
|||
}
|
||||
|
||||
/*
|
||||
* OK, we need to generate a sorted list of XIDs that our workers
|
||||
* should view as current. First, figure out how many there are.
|
||||
* OK, we need to generate a sorted list of XIDs that our workers should
|
||||
* view as current. First, figure out how many there are.
|
||||
*/
|
||||
for (s = CurrentTransactionState; s != NULL; s = s->parent)
|
||||
{
|
||||
|
@ -5060,22 +5061,22 @@ xactGetCommittedChildren(TransactionId **ptr)
|
|||
*/
|
||||
XLogRecPtr
|
||||
XactLogCommitRecord(TimestampTz commit_time,
|
||||
int nsubxacts, TransactionId *subxacts,
|
||||
int nrels, RelFileNode *rels,
|
||||
int nmsgs, SharedInvalidationMessage *msgs,
|
||||
bool relcacheInval, bool forceSync,
|
||||
TransactionId twophase_xid)
|
||||
int nsubxacts, TransactionId *subxacts,
|
||||
int nrels, RelFileNode *rels,
|
||||
int nmsgs, SharedInvalidationMessage *msgs,
|
||||
bool relcacheInval, bool forceSync,
|
||||
TransactionId twophase_xid)
|
||||
{
|
||||
xl_xact_commit xlrec;
|
||||
xl_xact_xinfo xl_xinfo;
|
||||
xl_xact_dbinfo xl_dbinfo;
|
||||
xl_xact_subxacts xl_subxacts;
|
||||
xl_xact_commit xlrec;
|
||||
xl_xact_xinfo xl_xinfo;
|
||||
xl_xact_dbinfo xl_dbinfo;
|
||||
xl_xact_subxacts xl_subxacts;
|
||||
xl_xact_relfilenodes xl_relfilenodes;
|
||||
xl_xact_invals xl_invals;
|
||||
xl_xact_twophase xl_twophase;
|
||||
xl_xact_origin xl_origin;
|
||||
xl_xact_invals xl_invals;
|
||||
xl_xact_twophase xl_twophase;
|
||||
xl_xact_origin xl_origin;
|
||||
|
||||
uint8 info;
|
||||
uint8 info;
|
||||
|
||||
Assert(CritSectionCount > 0);
|
||||
|
||||
|
@ -5198,17 +5199,17 @@ XactLogCommitRecord(TimestampTz commit_time,
|
|||
*/
|
||||
XLogRecPtr
|
||||
XactLogAbortRecord(TimestampTz abort_time,
|
||||
int nsubxacts, TransactionId *subxacts,
|
||||
int nrels, RelFileNode *rels,
|
||||
TransactionId twophase_xid)
|
||||
int nsubxacts, TransactionId *subxacts,
|
||||
int nrels, RelFileNode *rels,
|
||||
TransactionId twophase_xid)
|
||||
{
|
||||
xl_xact_abort xlrec;
|
||||
xl_xact_xinfo xl_xinfo;
|
||||
xl_xact_subxacts xl_subxacts;
|
||||
xl_xact_abort xlrec;
|
||||
xl_xact_xinfo xl_xinfo;
|
||||
xl_xact_subxacts xl_subxacts;
|
||||
xl_xact_relfilenodes xl_relfilenodes;
|
||||
xl_xact_twophase xl_twophase;
|
||||
xl_xact_twophase xl_twophase;
|
||||
|
||||
uint8 info;
|
||||
uint8 info;
|
||||
|
||||
Assert(CritSectionCount > 0);
|
||||
|
||||
|
@ -5289,7 +5290,7 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
|
|||
{
|
||||
TransactionId max_xid;
|
||||
int i;
|
||||
TimestampTz commit_time;
|
||||
TimestampTz commit_time;
|
||||
|
||||
max_xid = TransactionIdLatest(xid, parsed->nsubxacts, parsed->subxacts);
|
||||
|
||||
|
@ -5351,13 +5352,13 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
|
|||
* recovered. It's unlikely but it's good to be safe.
|
||||
*/
|
||||
TransactionIdAsyncCommitTree(
|
||||
xid, parsed->nsubxacts, parsed->subxacts, lsn);
|
||||
xid, parsed->nsubxacts, parsed->subxacts, lsn);
|
||||
|
||||
/*
|
||||
* We must mark clog before we update the ProcArray.
|
||||
*/
|
||||
ExpireTreeKnownAssignedTransactionIds(
|
||||
xid, parsed->nsubxacts, parsed->subxacts, max_xid);
|
||||
xid, parsed->nsubxacts, parsed->subxacts, max_xid);
|
||||
|
||||
/*
|
||||
* Send any cache invalidations attached to the commit. We must
|
||||
|
@ -5365,9 +5366,9 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
|
|||
* occurs in CommitTransaction().
|
||||
*/
|
||||
ProcessCommittedInvalidationMessages(
|
||||
parsed->msgs, parsed->nmsgs,
|
||||
XactCompletionRelcacheInitFileInval(parsed->xinfo),
|
||||
parsed->dbId, parsed->tsId);
|
||||
parsed->msgs, parsed->nmsgs,
|
||||
XactCompletionRelcacheInitFileInval(parsed->xinfo),
|
||||
parsed->dbId, parsed->tsId);
|
||||
|
||||
/*
|
||||
* Release locks, if any. We do this for both two phase and normal one
|
||||
|
@ -5383,7 +5384,7 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
|
|||
{
|
||||
/* recover apply progress */
|
||||
replorigin_advance(origin_id, parsed->origin_lsn, lsn,
|
||||
false /* backward */, false /* WAL */);
|
||||
false /* backward */ , false /* WAL */ );
|
||||
}
|
||||
|
||||
/* Make sure files supposed to be dropped are dropped */
|
||||
|
@ -5447,8 +5448,8 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
|
|||
static void
|
||||
xact_redo_abort(xl_xact_parsed_abort *parsed, TransactionId xid)
|
||||
{
|
||||
int i;
|
||||
TransactionId max_xid;
|
||||
int i;
|
||||
TransactionId max_xid;
|
||||
|
||||
/*
|
||||
* Make sure nextXid is beyond any XID mentioned in the record.
|
||||
|
@ -5495,7 +5496,7 @@ xact_redo_abort(xl_xact_parsed_abort *parsed, TransactionId xid)
|
|||
* We must update the ProcArray after we have marked clog.
|
||||
*/
|
||||
ExpireTreeKnownAssignedTransactionIds(
|
||||
xid, parsed->nsubxacts, parsed->subxacts, max_xid);
|
||||
xid, parsed->nsubxacts, parsed->subxacts, max_xid);
|
||||
|
||||
/*
|
||||
* There are no flat files that need updating, nor invalidation
|
||||
|
@ -5557,7 +5558,7 @@ xact_redo(XLogReaderState *record)
|
|||
xl_xact_parsed_abort parsed;
|
||||
|
||||
ParseAbortRecord(XLogRecGetInfo(record), xlrec,
|
||||
&parsed);
|
||||
&parsed);
|
||||
|
||||
if (info == XLOG_XACT_ABORT)
|
||||
{
|
||||
|
|
|
@ -81,8 +81,8 @@ extern uint32 bootstrap_data_checksum_version;
|
|||
|
||||
|
||||
/* User-settable parameters */
|
||||
int max_wal_size = 64; /* 1 GB */
|
||||
int min_wal_size = 5; /* 80 MB */
|
||||
int max_wal_size = 64; /* 1 GB */
|
||||
int min_wal_size = 5; /* 80 MB */
|
||||
int wal_keep_segments = 0;
|
||||
int XLOGbuffers = -1;
|
||||
int XLogArchiveTimeout = 0;
|
||||
|
@ -951,14 +951,14 @@ XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn)
|
|||
/*
|
||||
* Check to see if my copy of RedoRecPtr or doPageWrites is out of date.
|
||||
* If so, may have to go back and have the caller recompute everything.
|
||||
* This can only happen just after a checkpoint, so it's better to be
|
||||
* slow in this case and fast otherwise.
|
||||
* This can only happen just after a checkpoint, so it's better to be slow
|
||||
* in this case and fast otherwise.
|
||||
*
|
||||
* If we aren't doing full-page writes then RedoRecPtr doesn't actually
|
||||
* affect the contents of the XLOG record, so we'll update our local copy
|
||||
* but not force a recomputation. (If doPageWrites was just turned off,
|
||||
* we could recompute the record without full pages, but we choose not
|
||||
* to bother.)
|
||||
* we could recompute the record without full pages, but we choose not to
|
||||
* bother.)
|
||||
*/
|
||||
if (RedoRecPtr != Insert->RedoRecPtr)
|
||||
{
|
||||
|
@ -970,8 +970,8 @@ XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn)
|
|||
if (fpw_lsn != InvalidXLogRecPtr && fpw_lsn <= RedoRecPtr && doPageWrites)
|
||||
{
|
||||
/*
|
||||
* Oops, some buffer now needs to be backed up that the caller
|
||||
* didn't back up. Start over.
|
||||
* Oops, some buffer now needs to be backed up that the caller didn't
|
||||
* back up. Start over.
|
||||
*/
|
||||
WALInsertLockRelease();
|
||||
END_CRIT_SECTION();
|
||||
|
@ -1100,8 +1100,8 @@ XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn)
|
|||
{
|
||||
appendStringInfo(&buf, "error decoding record: out of memory");
|
||||
}
|
||||
else if (!DecodeXLogRecord(debug_reader, (XLogRecord *) recordBuf.data,
|
||||
&errormsg))
|
||||
else if (!DecodeXLogRecord(debug_reader, (XLogRecord *) recordBuf.data,
|
||||
&errormsg))
|
||||
{
|
||||
appendStringInfo(&buf, "error decoding record: %s",
|
||||
errormsg ? errormsg : "no error message");
|
||||
|
@ -1932,11 +1932,11 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic)
|
|||
/*
|
||||
* Fill the new page's header
|
||||
*/
|
||||
NewPage ->xlp_magic = XLOG_PAGE_MAGIC;
|
||||
NewPage->xlp_magic = XLOG_PAGE_MAGIC;
|
||||
|
||||
/* NewPage->xlp_info = 0; */ /* done by memset */
|
||||
NewPage ->xlp_tli = ThisTimeLineID;
|
||||
NewPage ->xlp_pageaddr = NewPageBeginPtr;
|
||||
NewPage->xlp_tli = ThisTimeLineID;
|
||||
NewPage->xlp_pageaddr = NewPageBeginPtr;
|
||||
|
||||
/* NewPage->xlp_rem_len = 0; */ /* done by memset */
|
||||
|
||||
|
@ -1954,7 +1954,7 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic)
|
|||
* compress a few records.
|
||||
*/
|
||||
if (!Insert->forcePageWrites)
|
||||
NewPage ->xlp_info |= XLP_BKP_REMOVABLE;
|
||||
NewPage->xlp_info |= XLP_BKP_REMOVABLE;
|
||||
|
||||
/*
|
||||
* If first page of an XLOG segment file, make it a long header.
|
||||
|
@ -1966,7 +1966,7 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic)
|
|||
NewLongPage->xlp_sysid = ControlFile->system_identifier;
|
||||
NewLongPage->xlp_seg_size = XLogSegSize;
|
||||
NewLongPage->xlp_xlog_blcksz = XLOG_BLCKSZ;
|
||||
NewPage ->xlp_info |= XLP_LONG_HEADER;
|
||||
NewPage->xlp_info |= XLP_LONG_HEADER;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2008,10 +2008,10 @@ CalculateCheckpointSegments(void)
|
|||
*
|
||||
* a) we keep WAL for two checkpoint cycles, back to the "prev" checkpoint.
|
||||
* b) during checkpoint, we consume checkpoint_completion_target *
|
||||
* number of segments consumed between checkpoints.
|
||||
* number of segments consumed between checkpoints.
|
||||
*-------
|
||||
*/
|
||||
target = (double ) max_wal_size / (2.0 + CheckPointCompletionTarget);
|
||||
target = (double) max_wal_size / (2.0 + CheckPointCompletionTarget);
|
||||
|
||||
/* round down */
|
||||
CheckPointSegments = (int) target;
|
||||
|
@ -2052,15 +2052,15 @@ XLOGfileslop(XLogRecPtr PriorRedoPtr)
|
|||
* remove enough segments to stay below the maximum.
|
||||
*/
|
||||
minSegNo = PriorRedoPtr / XLOG_SEG_SIZE + min_wal_size - 1;
|
||||
maxSegNo = PriorRedoPtr / XLOG_SEG_SIZE + max_wal_size - 1;
|
||||
maxSegNo = PriorRedoPtr / XLOG_SEG_SIZE + max_wal_size - 1;
|
||||
|
||||
/*
|
||||
* Between those limits, recycle enough segments to get us through to the
|
||||
* estimated end of next checkpoint.
|
||||
*
|
||||
* To estimate where the next checkpoint will finish, assume that the
|
||||
* system runs steadily consuming CheckPointDistanceEstimate
|
||||
* bytes between every checkpoint.
|
||||
* system runs steadily consuming CheckPointDistanceEstimate bytes between
|
||||
* every checkpoint.
|
||||
*
|
||||
* The reason this calculation is done from the prior checkpoint, not the
|
||||
* one that just finished, is that this behaves better if some checkpoint
|
||||
|
@ -3005,11 +3005,11 @@ XLogFileInit(XLogSegNo logsegno, bool *use_existent, bool use_lock)
|
|||
/*
|
||||
* XXX: What should we use as max_segno? We used to use XLOGfileslop when
|
||||
* that was a constant, but that was always a bit dubious: normally, at a
|
||||
* checkpoint, XLOGfileslop was the offset from the checkpoint record,
|
||||
* but here, it was the offset from the insert location. We can't do the
|
||||
* checkpoint, XLOGfileslop was the offset from the checkpoint record, but
|
||||
* here, it was the offset from the insert location. We can't do the
|
||||
* normal XLOGfileslop calculation here because we don't have access to
|
||||
* the prior checkpoint's redo location. So somewhat arbitrarily, just
|
||||
* use CheckPointSegments.
|
||||
* the prior checkpoint's redo location. So somewhat arbitrarily, just use
|
||||
* CheckPointSegments.
|
||||
*/
|
||||
max_segno = logsegno + CheckPointSegments;
|
||||
if (!InstallXLogFileSegment(&installed_segno, tmppath,
|
||||
|
@ -3098,7 +3098,8 @@ XLogFileCopy(char *dstfname, char *srcfname, int upto)
|
|||
nread = upto - nbytes;
|
||||
|
||||
/*
|
||||
* The part that is not read from the source file is filled with zeros.
|
||||
* The part that is not read from the source file is filled with
|
||||
* zeros.
|
||||
*/
|
||||
if (nread < sizeof(buffer))
|
||||
memset(buffer, 0, sizeof(buffer));
|
||||
|
@ -3153,8 +3154,8 @@ XLogFileCopy(char *dstfname, char *srcfname, int upto)
|
|||
|
||||
/*
|
||||
* Now move the segment into place with its final name. (Or just return
|
||||
* the path to the file we created, if the caller wants to handle the
|
||||
* rest on its own.)
|
||||
* the path to the file we created, if the caller wants to handle the rest
|
||||
* on its own.)
|
||||
*/
|
||||
if (dstfname)
|
||||
{
|
||||
|
@ -3690,8 +3691,8 @@ RemoveNonParentXlogFiles(XLogRecPtr switchpoint, TimeLineID newTLI)
|
|||
|
||||
/*
|
||||
* Remove files that are on a timeline older than the new one we're
|
||||
* switching to, but with a segment number >= the first segment on
|
||||
* the new timeline.
|
||||
* switching to, but with a segment number >= the first segment on the
|
||||
* new timeline.
|
||||
*/
|
||||
if (strncmp(xlde->d_name, switchseg, 8) < 0 &&
|
||||
strcmp(xlde->d_name + 8, switchseg + 8) > 0)
|
||||
|
@ -3768,12 +3769,13 @@ RemoveXlogFile(const char *segname, XLogRecPtr PriorRedoPtr, XLogRecPtr endptr)
|
|||
segname)));
|
||||
|
||||
#ifdef WIN32
|
||||
|
||||
/*
|
||||
* On Windows, if another process (e.g another backend) holds the file
|
||||
* open in FILE_SHARE_DELETE mode, unlink will succeed, but the file
|
||||
* will still show up in directory listing until the last handle is
|
||||
* closed. To avoid confusing the lingering deleted file for a live WAL
|
||||
* file that needs to be archived, rename it before deleting it.
|
||||
* closed. To avoid confusing the lingering deleted file for a live
|
||||
* WAL file that needs to be archived, rename it before deleting it.
|
||||
*
|
||||
* If another process holds the file open without FILE_SHARE_DELETE
|
||||
* flag, rename will fail. We'll try again at the next checkpoint.
|
||||
|
@ -3783,8 +3785,8 @@ RemoveXlogFile(const char *segname, XLogRecPtr PriorRedoPtr, XLogRecPtr endptr)
|
|||
{
|
||||
ereport(LOG,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not rename old transaction log file \"%s\": %m",
|
||||
path)));
|
||||
errmsg("could not rename old transaction log file \"%s\": %m",
|
||||
path)));
|
||||
return;
|
||||
}
|
||||
rc = unlink(newpath);
|
||||
|
@ -3795,8 +3797,8 @@ RemoveXlogFile(const char *segname, XLogRecPtr PriorRedoPtr, XLogRecPtr endptr)
|
|||
{
|
||||
ereport(LOG,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not remove old transaction log file \"%s\": %m",
|
||||
path)));
|
||||
errmsg("could not remove old transaction log file \"%s\": %m",
|
||||
path)));
|
||||
return;
|
||||
}
|
||||
CheckpointStats.ckpt_segs_removed++;
|
||||
|
@ -4609,11 +4611,11 @@ XLOGShmemInit(void)
|
|||
int i;
|
||||
|
||||
#ifdef WAL_DEBUG
|
||||
|
||||
/*
|
||||
* Create a memory context for WAL debugging that's exempt from the
|
||||
* normal "no pallocs in critical section" rule. Yes, that can lead to a
|
||||
* PANIC if an allocation fails, but wal_debug is not for production use
|
||||
* anyway.
|
||||
* Create a memory context for WAL debugging that's exempt from the normal
|
||||
* "no pallocs in critical section" rule. Yes, that can lead to a PANIC if
|
||||
* an allocation fails, but wal_debug is not for production use anyway.
|
||||
*/
|
||||
if (walDebugCxt == NULL)
|
||||
{
|
||||
|
@ -5044,7 +5046,7 @@ readRecoveryCommandFile(void)
|
|||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("invalid value for recovery parameter \"recovery_target\""),
|
||||
errhint("The only allowed value is \"immediate\".")));
|
||||
errhint("The only allowed value is \"immediate\".")));
|
||||
ereport(DEBUG2,
|
||||
(errmsg_internal("recovery_target = '%s'",
|
||||
item->value)));
|
||||
|
@ -5135,9 +5137,9 @@ readRecoveryCommandFile(void)
|
|||
}
|
||||
|
||||
/*
|
||||
* Override any inconsistent requests. Not that this is a change
|
||||
* of behaviour in 9.5; prior to this we simply ignored a request
|
||||
* to pause if hot_standby = off, which was surprising behaviour.
|
||||
* Override any inconsistent requests. Not that this is a change of
|
||||
* behaviour in 9.5; prior to this we simply ignored a request to pause if
|
||||
* hot_standby = off, which was surprising behaviour.
|
||||
*/
|
||||
if (recoveryTargetAction == RECOVERY_TARGET_ACTION_PAUSE &&
|
||||
recoveryTargetActionSet &&
|
||||
|
@ -6043,7 +6045,7 @@ StartupXLOG(void)
|
|||
if (read_backup_label(&checkPointLoc, &backupEndRequired,
|
||||
&backupFromStandby))
|
||||
{
|
||||
List *tablespaces = NIL;
|
||||
List *tablespaces = NIL;
|
||||
|
||||
/*
|
||||
* Archive recovery was requested, and thanks to the backup label
|
||||
|
@ -6099,7 +6101,7 @@ StartupXLOG(void)
|
|||
foreach(lc, tablespaces)
|
||||
{
|
||||
tablespaceinfo *ti = lfirst(lc);
|
||||
char *linkloc;
|
||||
char *linkloc;
|
||||
|
||||
linkloc = psprintf("pg_tblspc/%s", ti->oid);
|
||||
|
||||
|
@ -6112,26 +6114,26 @@ StartupXLOG(void)
|
|||
*/
|
||||
if (lstat(linkloc, &st) == 0 && S_ISDIR(st.st_mode))
|
||||
{
|
||||
if (!rmtree(linkloc,true))
|
||||
if (!rmtree(linkloc, true))
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not remove directory \"%s\": %m",
|
||||
linkloc)));
|
||||
errmsg("could not remove directory \"%s\": %m",
|
||||
linkloc)));
|
||||
}
|
||||
else
|
||||
{
|
||||
if (unlink(linkloc) < 0 && errno != ENOENT)
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not remove symbolic link \"%s\": %m",
|
||||
linkloc)));
|
||||
errmsg("could not remove symbolic link \"%s\": %m",
|
||||
linkloc)));
|
||||
}
|
||||
|
||||
if (symlink(ti->path, linkloc) < 0)
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not create symbolic link \"%s\": %m",
|
||||
linkloc)));
|
||||
errmsg("could not create symbolic link \"%s\": %m",
|
||||
linkloc)));
|
||||
|
||||
pfree(ti->oid);
|
||||
pfree(ti->path);
|
||||
|
@ -6222,9 +6224,9 @@ StartupXLOG(void)
|
|||
* in place if the database had been cleanly shut down, but it seems
|
||||
* safest to just remove them always and let them be rebuilt during the
|
||||
* first backend startup. These files needs to be removed from all
|
||||
* directories including pg_tblspc, however the symlinks are created
|
||||
* only after reading tablesapce_map file in case of archive recovery
|
||||
* from backup, so needs to clear old relcache files here after creating
|
||||
* directories including pg_tblspc, however the symlinks are created only
|
||||
* after reading tablesapce_map file in case of archive recovery from
|
||||
* backup, so needs to clear old relcache files here after creating
|
||||
* symlinks.
|
||||
*/
|
||||
RelationCacheInitFileRemove();
|
||||
|
@ -6442,9 +6444,9 @@ StartupXLOG(void)
|
|||
* Also set backupEndPoint and use minRecoveryPoint as the backup end
|
||||
* location if we're starting recovery from a base backup which was
|
||||
* taken from a standby. In this case, the database system status in
|
||||
* pg_control must indicate that the database was already in
|
||||
* recovery. Usually that will be DB_IN_ARCHIVE_RECOVERY but also can
|
||||
* be DB_SHUTDOWNED_IN_RECOVERY if recovery previously was interrupted
|
||||
* pg_control must indicate that the database was already in recovery.
|
||||
* Usually that will be DB_IN_ARCHIVE_RECOVERY but also can be
|
||||
* DB_SHUTDOWNED_IN_RECOVERY if recovery previously was interrupted
|
||||
* before reaching this point; e.g. because restore_command or
|
||||
* primary_conninfo were faulty.
|
||||
*
|
||||
|
@ -6500,10 +6502,10 @@ StartupXLOG(void)
|
|||
|
||||
/*
|
||||
* If there was a tablespace_map file, it's done its job and the
|
||||
* symlinks have been created. We must get rid of the map file
|
||||
* so that if we crash during recovery, we don't create symlinks
|
||||
* again. It seems prudent though to just rename the file out of
|
||||
* the way rather than delete it completely.
|
||||
* symlinks have been created. We must get rid of the map file so
|
||||
* that if we crash during recovery, we don't create symlinks again.
|
||||
* It seems prudent though to just rename the file out of the way
|
||||
* rather than delete it completely.
|
||||
*/
|
||||
if (haveTblspcMap)
|
||||
{
|
||||
|
@ -6859,7 +6861,8 @@ StartupXLOG(void)
|
|||
{
|
||||
/*
|
||||
* Before we continue on the new timeline, clean up any
|
||||
* (possibly bogus) future WAL segments on the old timeline.
|
||||
* (possibly bogus) future WAL segments on the old
|
||||
* timeline.
|
||||
*/
|
||||
RemoveNonParentXlogFiles(EndRecPtr, ThisTimeLineID);
|
||||
|
||||
|
@ -6890,32 +6893,33 @@ StartupXLOG(void)
|
|||
{
|
||||
if (!reachedConsistency)
|
||||
ereport(FATAL,
|
||||
(errmsg("requested recovery stop point is before consistent recovery point")));
|
||||
(errmsg("requested recovery stop point is before consistent recovery point")));
|
||||
|
||||
/*
|
||||
* This is the last point where we can restart recovery with a
|
||||
* new recovery target, if we shutdown and begin again. After
|
||||
* this, Resource Managers may choose to do permanent corrective
|
||||
* actions at end of recovery.
|
||||
* this, Resource Managers may choose to do permanent
|
||||
* corrective actions at end of recovery.
|
||||
*/
|
||||
switch (recoveryTargetAction)
|
||||
{
|
||||
case RECOVERY_TARGET_ACTION_SHUTDOWN:
|
||||
/*
|
||||
* exit with special return code to request shutdown
|
||||
* of postmaster. Log messages issued from
|
||||
* postmaster.
|
||||
*/
|
||||
proc_exit(3);
|
||||
|
||||
/*
|
||||
* exit with special return code to request shutdown
|
||||
* of postmaster. Log messages issued from
|
||||
* postmaster.
|
||||
*/
|
||||
proc_exit(3);
|
||||
|
||||
case RECOVERY_TARGET_ACTION_PAUSE:
|
||||
SetRecoveryPause(true);
|
||||
recoveryPausesHere();
|
||||
SetRecoveryPause(true);
|
||||
recoveryPausesHere();
|
||||
|
||||
/* drop into promote */
|
||||
/* drop into promote */
|
||||
|
||||
case RECOVERY_TARGET_ACTION_PROMOTE:
|
||||
break;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -7259,8 +7263,8 @@ StartupXLOG(void)
|
|||
* too.
|
||||
*
|
||||
* If a .done or .ready file already exists for the old timeline,
|
||||
* however, we had already determined that the segment is complete,
|
||||
* so we can let it be archived normally. (In particular, if it was
|
||||
* however, we had already determined that the segment is complete, so
|
||||
* we can let it be archived normally. (In particular, if it was
|
||||
* restored from the archive to begin with, it's expected to have a
|
||||
* .done file).
|
||||
*/
|
||||
|
@ -7291,8 +7295,8 @@ StartupXLOG(void)
|
|||
if (rename(origpath, partialpath) != 0)
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not rename file \"%s\" to \"%s\": %m",
|
||||
origpath, partialpath)));
|
||||
errmsg("could not rename file \"%s\" to \"%s\": %m",
|
||||
origpath, partialpath)));
|
||||
XLogArchiveNotify(partialfname);
|
||||
}
|
||||
}
|
||||
|
@ -7366,8 +7370,8 @@ StartupXLOG(void)
|
|||
XLogReportParameters();
|
||||
|
||||
/*
|
||||
* Local WAL inserts enabled, so it's time to finish initialization
|
||||
* of commit timestamp.
|
||||
* Local WAL inserts enabled, so it's time to finish initialization of
|
||||
* commit timestamp.
|
||||
*/
|
||||
CompleteCommitTsInitialization();
|
||||
|
||||
|
@ -7961,7 +7965,7 @@ LogCheckpointStart(int flags, bool restartpoint)
|
|||
(flags & CHECKPOINT_WAIT) ? " wait" : "",
|
||||
(flags & CHECKPOINT_CAUSE_XLOG) ? " xlog" : "",
|
||||
(flags & CHECKPOINT_CAUSE_TIME) ? " time" : "",
|
||||
(flags & CHECKPOINT_FLUSH_ALL) ? " flush-all" :"");
|
||||
(flags & CHECKPOINT_FLUSH_ALL) ? " flush-all" : "");
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -8056,8 +8060,8 @@ static void
|
|||
UpdateCheckPointDistanceEstimate(uint64 nbytes)
|
||||
{
|
||||
/*
|
||||
* To estimate the number of segments consumed between checkpoints, keep
|
||||
* a moving average of the amount of WAL generated in previous checkpoint
|
||||
* To estimate the number of segments consumed between checkpoints, keep a
|
||||
* moving average of the amount of WAL generated in previous checkpoint
|
||||
* cycles. However, if the load is bursty, with quiet periods and busy
|
||||
* periods, we want to cater for the peak load. So instead of a plain
|
||||
* moving average, let the average decline slowly if the previous cycle
|
||||
|
@ -9473,8 +9477,8 @@ xlog_redo(XLogReaderState *record)
|
|||
}
|
||||
|
||||
/*
|
||||
* Update the commit timestamp tracking. If there was a change
|
||||
* it needs to be activated or deactivated accordingly.
|
||||
* Update the commit timestamp tracking. If there was a change it
|
||||
* needs to be activated or deactivated accordingly.
|
||||
*/
|
||||
if (track_commit_timestamp != xlrec.track_commit_timestamp)
|
||||
{
|
||||
|
@ -9483,6 +9487,7 @@ xlog_redo(XLogReaderState *record)
|
|||
if (track_commit_timestamp)
|
||||
ActivateCommitTs();
|
||||
else
|
||||
|
||||
/*
|
||||
* We can't create a new WAL record here, but that's OK as
|
||||
* master did the WAL logging already and we will replay the
|
||||
|
@ -9996,7 +10001,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
|
|||
char *relpath = NULL;
|
||||
int rllen;
|
||||
StringInfoData buflinkpath;
|
||||
char *s = linkpath;
|
||||
char *s = linkpath;
|
||||
|
||||
/* Skip special stuff */
|
||||
if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0)
|
||||
|
@ -10023,10 +10028,10 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
|
|||
linkpath[rllen] = '\0';
|
||||
|
||||
/*
|
||||
* Add the escape character '\\' before newline in a string
|
||||
* to ensure that we can distinguish between the newline in
|
||||
* the tablespace path and end of line while reading
|
||||
* tablespace_map file during archive recovery.
|
||||
* Add the escape character '\\' before newline in a string to
|
||||
* ensure that we can distinguish between the newline in the
|
||||
* tablespace path and end of line while reading tablespace_map
|
||||
* file during archive recovery.
|
||||
*/
|
||||
initStringInfo(&buflinkpath);
|
||||
|
||||
|
@ -10054,8 +10059,8 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
|
|||
ti->rpath = relpath ? pstrdup(relpath) : NULL;
|
||||
ti->size = infotbssize ? sendTablespace(fullpath, true) : -1;
|
||||
|
||||
if(tablespaces)
|
||||
*tablespaces = lappend(*tablespaces, ti);
|
||||
if (tablespaces)
|
||||
*tablespaces = lappend(*tablespaces, ti);
|
||||
|
||||
appendStringInfo(&tblspc_mapfbuf, "%s %s\n", ti->oid, ti->path);
|
||||
|
||||
|
@ -10150,10 +10155,10 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
|
|||
}
|
||||
else
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("a backup is already in progress"),
|
||||
errhint("If you're sure there is no backup in progress, remove file \"%s\" and try again.",
|
||||
TABLESPACE_MAP)));
|
||||
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("a backup is already in progress"),
|
||||
errhint("If you're sure there is no backup in progress, remove file \"%s\" and try again.",
|
||||
TABLESPACE_MAP)));
|
||||
|
||||
fp = AllocateFile(TABLESPACE_MAP, "w");
|
||||
|
||||
|
@ -10353,8 +10358,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
|
|||
BACKUP_LABEL_FILE)));
|
||||
|
||||
/*
|
||||
* Remove tablespace_map file if present, it is created
|
||||
* only if there are tablespaces.
|
||||
* Remove tablespace_map file if present, it is created only if there
|
||||
* are tablespaces.
|
||||
*/
|
||||
unlink(TABLESPACE_MAP);
|
||||
}
|
||||
|
@ -10773,10 +10778,12 @@ read_tablespace_map(List **tablespaces)
|
|||
tablespaceinfo *ti;
|
||||
FILE *lfp;
|
||||
char tbsoid[MAXPGPATH];
|
||||
char *tbslinkpath;
|
||||
char *tbslinkpath;
|
||||
char str[MAXPGPATH];
|
||||
int ch, prev_ch = -1,
|
||||
i = 0, n;
|
||||
int ch,
|
||||
prev_ch = -1,
|
||||
i = 0,
|
||||
n;
|
||||
|
||||
/*
|
||||
* See if tablespace_map file is present
|
||||
|
@ -10794,9 +10801,9 @@ read_tablespace_map(List **tablespaces)
|
|||
|
||||
/*
|
||||
* Read and parse the link name and path lines from tablespace_map file
|
||||
* (this code is pretty crude, but we are not expecting any variability
|
||||
* in the file format). While taking backup we embed escape character
|
||||
* '\\' before newline in tablespace path, so that during reading of
|
||||
* (this code is pretty crude, but we are not expecting any variability in
|
||||
* the file format). While taking backup we embed escape character '\\'
|
||||
* before newline in tablespace path, so that during reading of
|
||||
* tablespace_map file, we could distinguish newline in tablespace path
|
||||
* and end of line. Now while reading tablespace_map file, remove the
|
||||
* escape character that has been added in tablespace path during backup.
|
||||
|
@ -10808,8 +10815,8 @@ read_tablespace_map(List **tablespaces)
|
|||
str[i] = '\0';
|
||||
if (sscanf(str, "%s %n", tbsoid, &n) != 1)
|
||||
ereport(FATAL,
|
||||
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("invalid data in file \"%s\"", TABLESPACE_MAP)));
|
||||
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("invalid data in file \"%s\"", TABLESPACE_MAP)));
|
||||
tbslinkpath = str + n;
|
||||
i = 0;
|
||||
|
||||
|
@ -10821,7 +10828,7 @@ read_tablespace_map(List **tablespaces)
|
|||
continue;
|
||||
}
|
||||
else if ((ch == '\n' || ch == '\r') && prev_ch == '\\')
|
||||
str[i-1] = ch;
|
||||
str[i - 1] = ch;
|
||||
else
|
||||
str[i++] = ch;
|
||||
prev_ch = ch;
|
||||
|
@ -10868,7 +10875,7 @@ BackupInProgress(void)
|
|||
|
||||
/*
|
||||
* CancelBackup: rename the "backup_label" and "tablespace_map"
|
||||
* files to cancel backup mode
|
||||
* files to cancel backup mode
|
||||
*
|
||||
* If the "backup_label" file exists, it will be renamed to "backup_label.old".
|
||||
* Similarly, if the "tablespace_map" file exists, it will be renamed to
|
||||
|
@ -11115,8 +11122,8 @@ static bool
|
|||
WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
|
||||
bool fetching_ckpt, XLogRecPtr tliRecPtr)
|
||||
{
|
||||
static TimestampTz last_fail_time = 0;
|
||||
TimestampTz now;
|
||||
static TimestampTz last_fail_time = 0;
|
||||
TimestampTz now;
|
||||
|
||||
/*-------
|
||||
* Standby mode is implemented by a state machine:
|
||||
|
@ -11270,9 +11277,10 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
|
|||
*/
|
||||
now = GetCurrentTimestamp();
|
||||
if (!TimestampDifferenceExceeds(last_fail_time, now,
|
||||
wal_retrieve_retry_interval))
|
||||
wal_retrieve_retry_interval))
|
||||
{
|
||||
long secs, wait_time;
|
||||
long secs,
|
||||
wait_time;
|
||||
int usecs;
|
||||
|
||||
TimestampDifference(last_fail_time, now, &secs, &usecs);
|
||||
|
@ -11280,7 +11288,7 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
|
|||
(secs * 1000 + usecs / 1000);
|
||||
|
||||
WaitLatch(&XLogCtl->recoveryWakeupLatch,
|
||||
WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
|
||||
WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
|
||||
wait_time);
|
||||
ResetLatch(&XLogCtl->recoveryWakeupLatch);
|
||||
now = GetCurrentTimestamp();
|
||||
|
@ -11605,8 +11613,8 @@ fsync_pgdata(char *datadir)
|
|||
return;
|
||||
|
||||
/*
|
||||
* If possible, hint to the kernel that we're soon going to fsync
|
||||
* the data directory and its contents.
|
||||
* If possible, hint to the kernel that we're soon going to fsync the data
|
||||
* directory and its contents.
|
||||
*/
|
||||
#if defined(HAVE_SYNC_FILE_RANGE) || \
|
||||
(defined(USE_POSIX_FADVISE) && defined(POSIX_FADV_DONTNEED))
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
#include "pg_trace.h"
|
||||
|
||||
/* Buffer size required to store a compressed version of backup block image */
|
||||
#define PGLZ_MAX_BLCKSZ PGLZ_MAX_OUTPUT(BLCKSZ)
|
||||
#define PGLZ_MAX_BLCKSZ PGLZ_MAX_OUTPUT(BLCKSZ)
|
||||
|
||||
/*
|
||||
* For each block reference registered with XLogRegisterBuffer, we fill in
|
||||
|
@ -58,7 +58,7 @@ typedef struct
|
|||
|
||||
/* buffer to store a compressed version of backup block image */
|
||||
char compressed_page[PGLZ_MAX_BLCKSZ];
|
||||
} registered_buffer;
|
||||
} registered_buffer;
|
||||
|
||||
static registered_buffer *registered_buffers;
|
||||
static int max_registered_buffers; /* allocated size */
|
||||
|
@ -110,7 +110,7 @@ static XLogRecData *XLogRecordAssemble(RmgrId rmid, uint8 info,
|
|||
XLogRecPtr RedoRecPtr, bool doPageWrites,
|
||||
XLogRecPtr *fpw_lsn);
|
||||
static bool XLogCompressBackupBlock(char *page, uint16 hole_offset,
|
||||
uint16 hole_length, char *dest, uint16 *dlen);
|
||||
uint16 hole_length, char *dest, uint16 *dlen);
|
||||
|
||||
/*
|
||||
* Begin constructing a WAL record. This must be called before the
|
||||
|
@ -602,7 +602,10 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
|
|||
&compressed_len);
|
||||
}
|
||||
|
||||
/* Fill in the remaining fields in the XLogRecordBlockHeader struct */
|
||||
/*
|
||||
* Fill in the remaining fields in the XLogRecordBlockHeader
|
||||
* struct
|
||||
*/
|
||||
bkpb.fork_flags |= BKPBLOCK_HAS_IMAGE;
|
||||
|
||||
/*
|
||||
|
@ -762,7 +765,7 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
|
|||
* the length of compressed block image.
|
||||
*/
|
||||
static bool
|
||||
XLogCompressBackupBlock(char * page, uint16 hole_offset, uint16 hole_length,
|
||||
XLogCompressBackupBlock(char *page, uint16 hole_offset, uint16 hole_length,
|
||||
char *dest, uint16 *dlen)
|
||||
{
|
||||
int32 orig_len = BLCKSZ - hole_length;
|
||||
|
@ -790,16 +793,15 @@ XLogCompressBackupBlock(char * page, uint16 hole_offset, uint16 hole_length,
|
|||
source = page;
|
||||
|
||||
/*
|
||||
* We recheck the actual size even if pglz_compress() reports success
|
||||
* and see if the number of bytes saved by compression is larger than
|
||||
* the length of extra data needed for the compressed version of block
|
||||
* image.
|
||||
* We recheck the actual size even if pglz_compress() reports success and
|
||||
* see if the number of bytes saved by compression is larger than the
|
||||
* length of extra data needed for the compressed version of block image.
|
||||
*/
|
||||
len = pglz_compress(source, orig_len, dest, PGLZ_strategy_default);
|
||||
if (len >= 0 &&
|
||||
len + extra_bytes < orig_len)
|
||||
{
|
||||
*dlen = (uint16) len; /* successful compression */
|
||||
*dlen = (uint16) len; /* successful compression */
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
|
|
@ -1086,50 +1086,53 @@ DecodeXLogRecord(XLogReaderState *state, XLogRecord *record, char **errormsg)
|
|||
blk->bimg_len == BLCKSZ))
|
||||
{
|
||||
report_invalid_record(state,
|
||||
"BKPIMAGE_HAS_HOLE set, but hole offset %u length %u block image length %u at %X/%X",
|
||||
"BKPIMAGE_HAS_HOLE set, but hole offset %u length %u block image length %u at %X/%X",
|
||||
(unsigned int) blk->hole_offset,
|
||||
(unsigned int) blk->hole_length,
|
||||
(unsigned int) blk->bimg_len,
|
||||
(uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr);
|
||||
goto err;
|
||||
}
|
||||
|
||||
/*
|
||||
* cross-check that hole_offset == 0 and hole_length == 0
|
||||
* if the HAS_HOLE flag is not set.
|
||||
* cross-check that hole_offset == 0 and hole_length == 0 if
|
||||
* the HAS_HOLE flag is not set.
|
||||
*/
|
||||
if (!(blk->bimg_info & BKPIMAGE_HAS_HOLE) &&
|
||||
(blk->hole_offset != 0 || blk->hole_length != 0))
|
||||
{
|
||||
report_invalid_record(state,
|
||||
"BKPIMAGE_HAS_HOLE not set, but hole offset %u length %u at %X/%X",
|
||||
"BKPIMAGE_HAS_HOLE not set, but hole offset %u length %u at %X/%X",
|
||||
(unsigned int) blk->hole_offset,
|
||||
(unsigned int) blk->hole_length,
|
||||
(uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr);
|
||||
goto err;
|
||||
}
|
||||
|
||||
/*
|
||||
* cross-check that bimg_len < BLCKSZ
|
||||
* if the IS_COMPRESSED flag is set.
|
||||
* cross-check that bimg_len < BLCKSZ if the IS_COMPRESSED
|
||||
* flag is set.
|
||||
*/
|
||||
if ((blk->bimg_info & BKPIMAGE_IS_COMPRESSED) &&
|
||||
blk->bimg_len == BLCKSZ)
|
||||
{
|
||||
report_invalid_record(state,
|
||||
"BKPIMAGE_IS_COMPRESSED set, but block image length %u at %X/%X",
|
||||
"BKPIMAGE_IS_COMPRESSED set, but block image length %u at %X/%X",
|
||||
(unsigned int) blk->bimg_len,
|
||||
(uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr);
|
||||
goto err;
|
||||
}
|
||||
|
||||
/*
|
||||
* cross-check that bimg_len = BLCKSZ if neither
|
||||
* HAS_HOLE nor IS_COMPRESSED flag is set.
|
||||
* cross-check that bimg_len = BLCKSZ if neither HAS_HOLE nor
|
||||
* IS_COMPRESSED flag is set.
|
||||
*/
|
||||
if (!(blk->bimg_info & BKPIMAGE_HAS_HOLE) &&
|
||||
!(blk->bimg_info & BKPIMAGE_IS_COMPRESSED) &&
|
||||
blk->bimg_len != BLCKSZ)
|
||||
{
|
||||
report_invalid_record(state,
|
||||
"neither BKPIMAGE_HAS_HOLE nor BKPIMAGE_IS_COMPRESSED set, but block image length is %u at %X/%X",
|
||||
"neither BKPIMAGE_HAS_HOLE nor BKPIMAGE_IS_COMPRESSED set, but block image length is %u at %X/%X",
|
||||
(unsigned int) blk->data_len,
|
||||
(uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr);
|
||||
goto err;
|
||||
|
@ -1294,8 +1297,8 @@ bool
|
|||
RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page)
|
||||
{
|
||||
DecodedBkpBlock *bkpb;
|
||||
char *ptr;
|
||||
char tmp[BLCKSZ];
|
||||
char *ptr;
|
||||
char tmp[BLCKSZ];
|
||||
|
||||
if (!record->blocks[block_id].in_use)
|
||||
return false;
|
||||
|
|
|
@ -401,6 +401,7 @@ AuxiliaryProcessMain(int argc, char *argv[])
|
|||
proc_exit(1); /* should never return */
|
||||
|
||||
case BootstrapProcess:
|
||||
|
||||
/*
|
||||
* There was a brief instant during which mode was Normal; this is
|
||||
* okay. We need to be in bootstrap mode during BootStrapXLOG for
|
||||
|
|
|
@ -189,7 +189,8 @@ sub Catalogs
|
|||
}
|
||||
else
|
||||
{
|
||||
die "unknown column option $attopt on column $attname"
|
||||
die
|
||||
"unknown column option $attopt on column $attname";
|
||||
}
|
||||
}
|
||||
push @{ $catalog{columns} }, \%row;
|
||||
|
|
|
@ -397,14 +397,14 @@ ExecuteGrantStmt(GrantStmt *stmt)
|
|||
istmt.behavior = stmt->behavior;
|
||||
|
||||
/*
|
||||
* Convert the RoleSpec list into an Oid list. Note that at this point
|
||||
* we insert an ACL_ID_PUBLIC into the list if appropriate, so downstream
|
||||
* Convert the RoleSpec list into an Oid list. Note that at this point we
|
||||
* insert an ACL_ID_PUBLIC into the list if appropriate, so downstream
|
||||
* there shouldn't be any additional work needed to support this case.
|
||||
*/
|
||||
foreach(cell, stmt->grantees)
|
||||
{
|
||||
RoleSpec *grantee = (RoleSpec *) lfirst(cell);
|
||||
Oid grantee_uid;
|
||||
RoleSpec *grantee = (RoleSpec *) lfirst(cell);
|
||||
Oid grantee_uid;
|
||||
|
||||
switch (grantee->roletype)
|
||||
{
|
||||
|
@ -892,14 +892,14 @@ ExecAlterDefaultPrivilegesStmt(AlterDefaultPrivilegesStmt *stmt)
|
|||
iacls.behavior = action->behavior;
|
||||
|
||||
/*
|
||||
* Convert the RoleSpec list into an Oid list. Note that at this point
|
||||
* we insert an ACL_ID_PUBLIC into the list if appropriate, so downstream
|
||||
* Convert the RoleSpec list into an Oid list. Note that at this point we
|
||||
* insert an ACL_ID_PUBLIC into the list if appropriate, so downstream
|
||||
* there shouldn't be any additional work needed to support this case.
|
||||
*/
|
||||
foreach(cell, action->grantees)
|
||||
{
|
||||
RoleSpec *grantee = (RoleSpec *) lfirst(cell);
|
||||
Oid grantee_uid;
|
||||
RoleSpec *grantee = (RoleSpec *) lfirst(cell);
|
||||
Oid grantee_uid;
|
||||
|
||||
switch (grantee->roletype)
|
||||
{
|
||||
|
|
|
@ -213,8 +213,8 @@ deleteObjectsInList(ObjectAddresses *targetObjects, Relation *depRel,
|
|||
{
|
||||
const ObjectAddress *thisobj = &targetObjects->refs[i];
|
||||
const ObjectAddressExtra *extra = &targetObjects->extras[i];
|
||||
bool original = false;
|
||||
bool normal = false;
|
||||
bool original = false;
|
||||
bool normal = false;
|
||||
|
||||
if (extra->flags & DEPFLAG_ORIGINAL)
|
||||
original = true;
|
||||
|
@ -1611,10 +1611,10 @@ find_expr_references_walker(Node *node,
|
|||
context->addrs);
|
||||
break;
|
||||
|
||||
/*
|
||||
* Dependencies for regrole should be shared among all
|
||||
* databases, so explicitly inhibit to have dependencies.
|
||||
*/
|
||||
/*
|
||||
* Dependencies for regrole should be shared among all
|
||||
* databases, so explicitly inhibit to have dependencies.
|
||||
*/
|
||||
case REGROLEOID:
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
|
|
|
@ -147,7 +147,7 @@ foreach my $catname (@{ $catalogs->{names} })
|
|||
}
|
||||
print BKI "\n )\n";
|
||||
|
||||
# open it, unless bootstrap case (create bootstrap does this automatically)
|
||||
# open it, unless bootstrap case (create bootstrap does this automatically)
|
||||
if ($catalog->{bootstrap} eq '')
|
||||
{
|
||||
print BKI "open $catname\n";
|
||||
|
@ -242,12 +242,12 @@ foreach my $catname (@{ $catalogs->{names} })
|
|||
{
|
||||
$attnum = 0;
|
||||
my @SYS_ATTRS = (
|
||||
{ name => 'ctid', type => 'tid' },
|
||||
{ name => 'oid', type => 'oid' },
|
||||
{ name => 'xmin', type => 'xid' },
|
||||
{ name => 'cmin', type=> 'cid' },
|
||||
{ name => 'xmax', type=> 'xid' },
|
||||
{ name => 'cmax', type => 'cid' },
|
||||
{ name => 'ctid', type => 'tid' },
|
||||
{ name => 'oid', type => 'oid' },
|
||||
{ name => 'xmin', type => 'xid' },
|
||||
{ name => 'cmin', type => 'cid' },
|
||||
{ name => 'xmax', type => 'xid' },
|
||||
{ name => 'cmax', type => 'cid' },
|
||||
{ name => 'tableoid', type => 'oid' });
|
||||
foreach my $attr (@SYS_ATTRS)
|
||||
{
|
||||
|
@ -384,6 +384,7 @@ sub emit_pgattr_row
|
|||
}
|
||||
elsif ($priornotnull)
|
||||
{
|
||||
|
||||
# attnotnull will automatically be set if the type is
|
||||
# fixed-width and prior columns are all NOT NULL ---
|
||||
# compare DefineAttr in bootstrap.c. oidvector and
|
||||
|
|
|
@ -1709,8 +1709,8 @@ BuildSpeculativeIndexInfo(Relation index, IndexInfo *ii)
|
|||
ii->ii_UniqueStrats = (uint16 *) palloc(sizeof(uint16) * ncols);
|
||||
|
||||
/*
|
||||
* We have to look up the operator's strategy number. This
|
||||
* provides a cross-check that the operator does match the index.
|
||||
* We have to look up the operator's strategy number. This provides a
|
||||
* cross-check that the operator does match the index.
|
||||
*/
|
||||
/* We need the func OIDs and strategy numbers too */
|
||||
for (i = 0; i < ncols; i++)
|
||||
|
@ -3186,7 +3186,7 @@ IndexGetRelation(Oid indexId, bool missing_ok)
|
|||
*/
|
||||
void
|
||||
reindex_index(Oid indexId, bool skip_constraint_checks, char persistence,
|
||||
int options)
|
||||
int options)
|
||||
{
|
||||
Relation iRel,
|
||||
heapRelation;
|
||||
|
|
|
@ -453,89 +453,188 @@ static const struct object_type_map
|
|||
const char *tm_name;
|
||||
ObjectType tm_type;
|
||||
}
|
||||
ObjectTypeMap[] =
|
||||
|
||||
ObjectTypeMap[] =
|
||||
{
|
||||
/* OCLASS_CLASS, all kinds of relations */
|
||||
{ "table", OBJECT_TABLE },
|
||||
{ "index", OBJECT_INDEX },
|
||||
{ "sequence", OBJECT_SEQUENCE },
|
||||
{ "toast table", -1 }, /* unmapped */
|
||||
{ "view", OBJECT_VIEW },
|
||||
{ "materialized view", OBJECT_MATVIEW },
|
||||
{ "composite type", -1 }, /* unmapped */
|
||||
{ "foreign table", OBJECT_FOREIGN_TABLE },
|
||||
{ "table column", OBJECT_COLUMN },
|
||||
{ "index column", -1 }, /* unmapped */
|
||||
{ "sequence column", -1 }, /* unmapped */
|
||||
{ "toast table column", -1 }, /* unmapped */
|
||||
{ "view column", -1 }, /* unmapped */
|
||||
{ "materialized view column", -1 }, /* unmapped */
|
||||
{ "composite type column", -1 }, /* unmapped */
|
||||
{ "foreign table column", OBJECT_COLUMN },
|
||||
{
|
||||
"table", OBJECT_TABLE
|
||||
},
|
||||
{
|
||||
"index", OBJECT_INDEX
|
||||
},
|
||||
{
|
||||
"sequence", OBJECT_SEQUENCE
|
||||
},
|
||||
{
|
||||
"toast table", -1
|
||||
}, /* unmapped */
|
||||
{
|
||||
"view", OBJECT_VIEW
|
||||
},
|
||||
{
|
||||
"materialized view", OBJECT_MATVIEW
|
||||
},
|
||||
{
|
||||
"composite type", -1
|
||||
}, /* unmapped */
|
||||
{
|
||||
"foreign table", OBJECT_FOREIGN_TABLE
|
||||
},
|
||||
{
|
||||
"table column", OBJECT_COLUMN
|
||||
},
|
||||
{
|
||||
"index column", -1
|
||||
}, /* unmapped */
|
||||
{
|
||||
"sequence column", -1
|
||||
}, /* unmapped */
|
||||
{
|
||||
"toast table column", -1
|
||||
}, /* unmapped */
|
||||
{
|
||||
"view column", -1
|
||||
}, /* unmapped */
|
||||
{
|
||||
"materialized view column", -1
|
||||
}, /* unmapped */
|
||||
{
|
||||
"composite type column", -1
|
||||
}, /* unmapped */
|
||||
{
|
||||
"foreign table column", OBJECT_COLUMN
|
||||
},
|
||||
/* OCLASS_PROC */
|
||||
{ "aggregate", OBJECT_AGGREGATE },
|
||||
{ "function", OBJECT_FUNCTION },
|
||||
{
|
||||
"aggregate", OBJECT_AGGREGATE
|
||||
},
|
||||
{
|
||||
"function", OBJECT_FUNCTION
|
||||
},
|
||||
/* OCLASS_TYPE */
|
||||
{ "type", OBJECT_TYPE },
|
||||
{
|
||||
"type", OBJECT_TYPE
|
||||
},
|
||||
/* OCLASS_CAST */
|
||||
{ "cast", OBJECT_CAST },
|
||||
{
|
||||
"cast", OBJECT_CAST
|
||||
},
|
||||
/* OCLASS_COLLATION */
|
||||
{ "collation", OBJECT_COLLATION },
|
||||
{
|
||||
"collation", OBJECT_COLLATION
|
||||
},
|
||||
/* OCLASS_CONSTRAINT */
|
||||
{ "table constraint", OBJECT_TABCONSTRAINT },
|
||||
{ "domain constraint", OBJECT_DOMCONSTRAINT },
|
||||
{
|
||||
"table constraint", OBJECT_TABCONSTRAINT
|
||||
},
|
||||
{
|
||||
"domain constraint", OBJECT_DOMCONSTRAINT
|
||||
},
|
||||
/* OCLASS_CONVERSION */
|
||||
{ "conversion", OBJECT_CONVERSION },
|
||||
{
|
||||
"conversion", OBJECT_CONVERSION
|
||||
},
|
||||
/* OCLASS_DEFAULT */
|
||||
{ "default value", OBJECT_DEFAULT },
|
||||
{
|
||||
"default value", OBJECT_DEFAULT
|
||||
},
|
||||
/* OCLASS_LANGUAGE */
|
||||
{ "language", OBJECT_LANGUAGE },
|
||||
{
|
||||
"language", OBJECT_LANGUAGE
|
||||
},
|
||||
/* OCLASS_LARGEOBJECT */
|
||||
{ "large object", OBJECT_LARGEOBJECT },
|
||||
{
|
||||
"large object", OBJECT_LARGEOBJECT
|
||||
},
|
||||
/* OCLASS_OPERATOR */
|
||||
{ "operator", OBJECT_OPERATOR },
|
||||
{
|
||||
"operator", OBJECT_OPERATOR
|
||||
},
|
||||
/* OCLASS_OPCLASS */
|
||||
{ "operator class", OBJECT_OPCLASS },
|
||||
{
|
||||
"operator class", OBJECT_OPCLASS
|
||||
},
|
||||
/* OCLASS_OPFAMILY */
|
||||
{ "operator family", OBJECT_OPFAMILY },
|
||||
{
|
||||
"operator family", OBJECT_OPFAMILY
|
||||
},
|
||||
/* OCLASS_AMOP */
|
||||
{ "operator of access method", OBJECT_AMOP },
|
||||
{
|
||||
"operator of access method", OBJECT_AMOP
|
||||
},
|
||||
/* OCLASS_AMPROC */
|
||||
{ "function of access method", OBJECT_AMPROC },
|
||||
{
|
||||
"function of access method", OBJECT_AMPROC
|
||||
},
|
||||
/* OCLASS_REWRITE */
|
||||
{ "rule", OBJECT_RULE },
|
||||
{
|
||||
"rule", OBJECT_RULE
|
||||
},
|
||||
/* OCLASS_TRIGGER */
|
||||
{ "trigger", OBJECT_TRIGGER },
|
||||
{
|
||||
"trigger", OBJECT_TRIGGER
|
||||
},
|
||||
/* OCLASS_SCHEMA */
|
||||
{ "schema", OBJECT_SCHEMA },
|
||||
{
|
||||
"schema", OBJECT_SCHEMA
|
||||
},
|
||||
/* OCLASS_TSPARSER */
|
||||
{ "text search parser", OBJECT_TSPARSER },
|
||||
{
|
||||
"text search parser", OBJECT_TSPARSER
|
||||
},
|
||||
/* OCLASS_TSDICT */
|
||||
{ "text search dictionary", OBJECT_TSDICTIONARY },
|
||||
{
|
||||
"text search dictionary", OBJECT_TSDICTIONARY
|
||||
},
|
||||
/* OCLASS_TSTEMPLATE */
|
||||
{ "text search template", OBJECT_TSTEMPLATE },
|
||||
{
|
||||
"text search template", OBJECT_TSTEMPLATE
|
||||
},
|
||||
/* OCLASS_TSCONFIG */
|
||||
{ "text search configuration", OBJECT_TSCONFIGURATION },
|
||||
{
|
||||
"text search configuration", OBJECT_TSCONFIGURATION
|
||||
},
|
||||
/* OCLASS_ROLE */
|
||||
{ "role", OBJECT_ROLE },
|
||||
{
|
||||
"role", OBJECT_ROLE
|
||||
},
|
||||
/* OCLASS_DATABASE */
|
||||
{ "database", OBJECT_DATABASE },
|
||||
{
|
||||
"database", OBJECT_DATABASE
|
||||
},
|
||||
/* OCLASS_TBLSPACE */
|
||||
{ "tablespace", OBJECT_TABLESPACE },
|
||||
{
|
||||
"tablespace", OBJECT_TABLESPACE
|
||||
},
|
||||
/* OCLASS_FDW */
|
||||
{ "foreign-data wrapper", OBJECT_FDW },
|
||||
{
|
||||
"foreign-data wrapper", OBJECT_FDW
|
||||
},
|
||||
/* OCLASS_FOREIGN_SERVER */
|
||||
{ "server", OBJECT_FOREIGN_SERVER },
|
||||
{
|
||||
"server", OBJECT_FOREIGN_SERVER
|
||||
},
|
||||
/* OCLASS_USER_MAPPING */
|
||||
{ "user mapping", OBJECT_USER_MAPPING },
|
||||
{
|
||||
"user mapping", OBJECT_USER_MAPPING
|
||||
},
|
||||
/* OCLASS_DEFACL */
|
||||
{ "default acl", OBJECT_DEFACL },
|
||||
{
|
||||
"default acl", OBJECT_DEFACL
|
||||
},
|
||||
/* OCLASS_EXTENSION */
|
||||
{ "extension", OBJECT_EXTENSION },
|
||||
{
|
||||
"extension", OBJECT_EXTENSION
|
||||
},
|
||||
/* OCLASS_EVENT_TRIGGER */
|
||||
{ "event trigger", OBJECT_EVENT_TRIGGER },
|
||||
{
|
||||
"event trigger", OBJECT_EVENT_TRIGGER
|
||||
},
|
||||
/* OCLASS_POLICY */
|
||||
{ "policy", OBJECT_POLICY }
|
||||
{
|
||||
"policy", OBJECT_POLICY
|
||||
}
|
||||
};
|
||||
|
||||
const ObjectAddress InvalidObjectAddress =
|
||||
|
@ -667,16 +766,16 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
|
|||
break;
|
||||
case OBJECT_DOMCONSTRAINT:
|
||||
{
|
||||
ObjectAddress domaddr;
|
||||
char *constrname;
|
||||
ObjectAddress domaddr;
|
||||
char *constrname;
|
||||
|
||||
domaddr = get_object_address_type(OBJECT_DOMAIN,
|
||||
list_head(objname), missing_ok);
|
||||
list_head(objname), missing_ok);
|
||||
constrname = strVal(linitial(objargs));
|
||||
|
||||
address.classId = ConstraintRelationId;
|
||||
address.objectId = get_domain_constraint_oid(domaddr.objectId,
|
||||
constrname, missing_ok);
|
||||
constrname, missing_ok);
|
||||
address.objectSubId = 0;
|
||||
|
||||
}
|
||||
|
@ -1286,8 +1385,8 @@ get_object_address_attrdef(ObjectType objtype, List *objname,
|
|||
if (attnum != InvalidAttrNumber && tupdesc->constr != NULL)
|
||||
{
|
||||
Relation attrdef;
|
||||
ScanKeyData keys[2];
|
||||
SysScanDesc scan;
|
||||
ScanKeyData keys[2];
|
||||
SysScanDesc scan;
|
||||
HeapTuple tup;
|
||||
|
||||
attrdef = relation_open(AttrDefaultRelationId, AccessShareLock);
|
||||
|
@ -1419,14 +1518,14 @@ static ObjectAddress
|
|||
get_object_address_opf_member(ObjectType objtype,
|
||||
List *objname, List *objargs, bool missing_ok)
|
||||
{
|
||||
ObjectAddress famaddr;
|
||||
ObjectAddress address;
|
||||
ListCell *cell;
|
||||
List *copy;
|
||||
char *typenames[2];
|
||||
Oid typeoids[2];
|
||||
int membernum;
|
||||
int i;
|
||||
ObjectAddress famaddr;
|
||||
ObjectAddress address;
|
||||
ListCell *cell;
|
||||
List *copy;
|
||||
char *typenames[2];
|
||||
Oid typeoids[2];
|
||||
int membernum;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* The last element of the objname list contains the strategy or procedure
|
||||
|
@ -1441,9 +1540,9 @@ get_object_address_opf_member(ObjectType objtype,
|
|||
|
||||
/* find out left/right type names and OIDs */
|
||||
i = 0;
|
||||
foreach (cell, objargs)
|
||||
foreach(cell, objargs)
|
||||
{
|
||||
ObjectAddress typaddr;
|
||||
ObjectAddress typaddr;
|
||||
|
||||
typenames[i] = strVal(lfirst(cell));
|
||||
typaddr = get_object_address_type(OBJECT_TYPE, cell, missing_ok);
|
||||
|
@ -1471,9 +1570,9 @@ get_object_address_opf_member(ObjectType objtype,
|
|||
if (!missing_ok)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
||||
errmsg("operator %d (%s, %s) of %s does not exist",
|
||||
membernum, typenames[0], typenames[1],
|
||||
getObjectDescription(&famaddr))));
|
||||
errmsg("operator %d (%s, %s) of %s does not exist",
|
||||
membernum, typenames[0], typenames[1],
|
||||
getObjectDescription(&famaddr))));
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -1500,9 +1599,9 @@ get_object_address_opf_member(ObjectType objtype,
|
|||
if (!missing_ok)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
||||
errmsg("function %d (%s, %s) of %s does not exist",
|
||||
membernum, typenames[0], typenames[1],
|
||||
getObjectDescription(&famaddr))));
|
||||
errmsg("function %d (%s, %s) of %s does not exist",
|
||||
membernum, typenames[0], typenames[1],
|
||||
getObjectDescription(&famaddr))));
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -1636,8 +1735,8 @@ get_object_address_defacl(List *objname, List *objargs, bool missing_ok)
|
|||
default:
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("unrecognized default ACL object type %c", objtype),
|
||||
errhint("Valid object types are 'r', 'S', 'f', and 'T'.")));
|
||||
errmsg("unrecognized default ACL object type %c", objtype),
|
||||
errhint("Valid object types are 'r', 'S', 'f', and 'T'.")));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1688,8 +1787,8 @@ not_found:
|
|||
else
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
||||
errmsg("default ACL for user \"%s\" on %s does not exist",
|
||||
username, objtype_str)));
|
||||
errmsg("default ACL for user \"%s\" on %s does not exist",
|
||||
username, objtype_str)));
|
||||
}
|
||||
return address;
|
||||
}
|
||||
|
@ -1701,11 +1800,11 @@ not_found:
|
|||
static List *
|
||||
textarray_to_strvaluelist(ArrayType *arr)
|
||||
{
|
||||
Datum *elems;
|
||||
bool *nulls;
|
||||
int nelems;
|
||||
List *list = NIL;
|
||||
int i;
|
||||
Datum *elems;
|
||||
bool *nulls;
|
||||
int nelems;
|
||||
List *list = NIL;
|
||||
int i;
|
||||
|
||||
deconstruct_array(arr, TEXTOID, -1, false, 'i',
|
||||
&elems, &nulls, &nelems);
|
||||
|
@ -1728,18 +1827,18 @@ textarray_to_strvaluelist(ArrayType *arr)
|
|||
Datum
|
||||
pg_get_object_address(PG_FUNCTION_ARGS)
|
||||
{
|
||||
char *ttype = TextDatumGetCString(PG_GETARG_TEXT_P(0));
|
||||
ArrayType *namearr = PG_GETARG_ARRAYTYPE_P(1);
|
||||
ArrayType *argsarr = PG_GETARG_ARRAYTYPE_P(2);
|
||||
int itype;
|
||||
ObjectType type;
|
||||
List *name;
|
||||
List *args;
|
||||
char *ttype = TextDatumGetCString(PG_GETARG_TEXT_P(0));
|
||||
ArrayType *namearr = PG_GETARG_ARRAYTYPE_P(1);
|
||||
ArrayType *argsarr = PG_GETARG_ARRAYTYPE_P(2);
|
||||
int itype;
|
||||
ObjectType type;
|
||||
List *name;
|
||||
List *args;
|
||||
ObjectAddress addr;
|
||||
TupleDesc tupdesc;
|
||||
Datum values[3];
|
||||
bool nulls[3];
|
||||
HeapTuple htup;
|
||||
TupleDesc tupdesc;
|
||||
Datum values[3];
|
||||
bool nulls[3];
|
||||
HeapTuple htup;
|
||||
Relation relation;
|
||||
|
||||
/* Decode object type, raise error if unknown */
|
||||
|
@ -1751,16 +1850,16 @@ pg_get_object_address(PG_FUNCTION_ARGS)
|
|||
type = (ObjectType) itype;
|
||||
|
||||
/*
|
||||
* Convert the text array to the representation appropriate for the
|
||||
* given object type. Most use a simple string Values list, but there
|
||||
* are some exceptions.
|
||||
* Convert the text array to the representation appropriate for the given
|
||||
* object type. Most use a simple string Values list, but there are some
|
||||
* exceptions.
|
||||
*/
|
||||
if (type == OBJECT_TYPE || type == OBJECT_DOMAIN || type == OBJECT_CAST ||
|
||||
type == OBJECT_DOMCONSTRAINT)
|
||||
{
|
||||
Datum *elems;
|
||||
bool *nulls;
|
||||
int nelems;
|
||||
Datum *elems;
|
||||
bool *nulls;
|
||||
int nelems;
|
||||
|
||||
deconstruct_array(namearr, TEXTOID, -1, false, 'i',
|
||||
&elems, &nulls, &nelems);
|
||||
|
@ -1812,10 +1911,10 @@ pg_get_object_address(PG_FUNCTION_ARGS)
|
|||
type == OBJECT_AMPROC)
|
||||
{
|
||||
/* in these cases, the args list must be of TypeName */
|
||||
Datum *elems;
|
||||
bool *nulls;
|
||||
int nelems;
|
||||
int i;
|
||||
Datum *elems;
|
||||
bool *nulls;
|
||||
int nelems;
|
||||
int i;
|
||||
|
||||
deconstruct_array(argsarr, TEXTOID, -1, false, 'i',
|
||||
&elems, &nulls, &nelems);
|
||||
|
@ -1826,9 +1925,9 @@ pg_get_object_address(PG_FUNCTION_ARGS)
|
|||
if (nulls[i])
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("name or argument lists may not contain nulls")));
|
||||
errmsg("name or argument lists may not contain nulls")));
|
||||
args = lappend(args,
|
||||
typeStringToTypeName(TextDatumGetCString(elems[i])));
|
||||
typeStringToTypeName(TextDatumGetCString(elems[i])));
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -1850,7 +1949,7 @@ pg_get_object_address(PG_FUNCTION_ARGS)
|
|||
if (list_length(args) != 1)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("argument list length must be exactly %d", 1)));
|
||||
errmsg("argument list length must be exactly %d", 1)));
|
||||
break;
|
||||
case OBJECT_OPFAMILY:
|
||||
case OBJECT_OPCLASS:
|
||||
|
@ -1870,7 +1969,7 @@ pg_get_object_address(PG_FUNCTION_ARGS)
|
|||
if (list_length(args) != 2)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("argument list length must be exactly %d", 2)));
|
||||
errmsg("argument list length must be exactly %d", 2)));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -2146,8 +2245,8 @@ read_objtype_from_string(const char *objtype)
|
|||
}
|
||||
if (i >= lengthof(ObjectTypeMap))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("unrecognized object type \"%s\"", objtype)));
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("unrecognized object type \"%s\"", objtype)));
|
||||
|
||||
return type;
|
||||
}
|
||||
|
@ -2693,7 +2792,7 @@ getObjectDescription(const ObjectAddress *object)
|
|||
Form_pg_transform trfForm;
|
||||
|
||||
trfTup = SearchSysCache1(TRFOID,
|
||||
ObjectIdGetDatum(object->objectId));
|
||||
ObjectIdGetDatum(object->objectId));
|
||||
if (!HeapTupleIsValid(trfTup))
|
||||
elog(ERROR, "could not find tuple for transform %u",
|
||||
object->objectId);
|
||||
|
@ -2924,28 +3023,28 @@ getObjectDescription(const ObjectAddress *object)
|
|||
case DEFACLOBJ_RELATION:
|
||||
appendStringInfo(&buffer,
|
||||
_("default privileges on new relations belonging to role %s"),
|
||||
GetUserNameFromId(defacl->defaclrole, false));
|
||||
GetUserNameFromId(defacl->defaclrole, false));
|
||||
break;
|
||||
case DEFACLOBJ_SEQUENCE:
|
||||
appendStringInfo(&buffer,
|
||||
_("default privileges on new sequences belonging to role %s"),
|
||||
GetUserNameFromId(defacl->defaclrole, false));
|
||||
GetUserNameFromId(defacl->defaclrole, false));
|
||||
break;
|
||||
case DEFACLOBJ_FUNCTION:
|
||||
appendStringInfo(&buffer,
|
||||
_("default privileges on new functions belonging to role %s"),
|
||||
GetUserNameFromId(defacl->defaclrole, false));
|
||||
GetUserNameFromId(defacl->defaclrole, false));
|
||||
break;
|
||||
case DEFACLOBJ_TYPE:
|
||||
appendStringInfo(&buffer,
|
||||
_("default privileges on new types belonging to role %s"),
|
||||
GetUserNameFromId(defacl->defaclrole, false));
|
||||
GetUserNameFromId(defacl->defaclrole, false));
|
||||
break;
|
||||
default:
|
||||
/* shouldn't get here */
|
||||
appendStringInfo(&buffer,
|
||||
_("default privileges belonging to role %s"),
|
||||
GetUserNameFromId(defacl->defaclrole, false));
|
||||
GetUserNameFromId(defacl->defaclrole, false));
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -2991,8 +3090,8 @@ getObjectDescription(const ObjectAddress *object)
|
|||
case OCLASS_POLICY:
|
||||
{
|
||||
Relation policy_rel;
|
||||
ScanKeyData skey[1];
|
||||
SysScanDesc sscan;
|
||||
ScanKeyData skey[1];
|
||||
SysScanDesc sscan;
|
||||
HeapTuple tuple;
|
||||
Form_pg_policy form_policy;
|
||||
|
||||
|
@ -3677,7 +3776,7 @@ getObjectIdentityParts(const ObjectAddress *object,
|
|||
|
||||
case OCLASS_TYPE:
|
||||
{
|
||||
char *typeout;
|
||||
char *typeout;
|
||||
|
||||
typeout = format_type_be_qualified(object->objectId);
|
||||
appendStringInfoString(&buffer, typeout);
|
||||
|
@ -3770,7 +3869,7 @@ getObjectIdentityParts(const ObjectAddress *object,
|
|||
|
||||
appendStringInfo(&buffer, "%s on %s",
|
||||
quote_identifier(NameStr(con->conname)),
|
||||
getObjectIdentityParts(&domain, objname, objargs));
|
||||
getObjectIdentityParts(&domain, objname, objargs));
|
||||
|
||||
if (objname)
|
||||
*objargs = lappend(*objargs, pstrdup(NameStr(con->conname)));
|
||||
|
@ -3794,8 +3893,8 @@ getObjectIdentityParts(const ObjectAddress *object,
|
|||
conForm = (Form_pg_conversion) GETSTRUCT(conTup);
|
||||
schema = get_namespace_name_or_temp(conForm->connamespace);
|
||||
appendStringInfoString(&buffer,
|
||||
quote_qualified_identifier(schema,
|
||||
NameStr(conForm->conname)));
|
||||
quote_qualified_identifier(schema,
|
||||
NameStr(conForm->conname)));
|
||||
if (objname)
|
||||
*objname = list_make2(schema,
|
||||
pstrdup(NameStr(conForm->conname)));
|
||||
|
@ -3901,7 +4000,7 @@ getObjectIdentityParts(const ObjectAddress *object,
|
|||
|
||||
appendStringInfo(&buffer, "%s USING %s",
|
||||
quote_qualified_identifier(schema,
|
||||
NameStr(opcForm->opcname)),
|
||||
NameStr(opcForm->opcname)),
|
||||
quote_identifier(NameStr(amForm->amname)));
|
||||
if (objname)
|
||||
*objname = list_make3(pstrdup(NameStr(amForm->amname)),
|
||||
|
@ -3956,7 +4055,7 @@ getObjectIdentityParts(const ObjectAddress *object,
|
|||
if (objname)
|
||||
{
|
||||
*objname = lappend(*objname,
|
||||
psprintf("%d", amopForm->amopstrategy));
|
||||
psprintf("%d", amopForm->amopstrategy));
|
||||
*objargs = list_make2(ltype, rtype);
|
||||
}
|
||||
|
||||
|
@ -4136,7 +4235,7 @@ getObjectIdentityParts(const ObjectAddress *object,
|
|||
NameStr(formParser->prsname)));
|
||||
if (objname)
|
||||
*objname = list_make2(schema,
|
||||
pstrdup(NameStr(formParser->prsname)));
|
||||
pstrdup(NameStr(formParser->prsname)));
|
||||
ReleaseSysCache(tup);
|
||||
break;
|
||||
}
|
||||
|
@ -4159,7 +4258,7 @@ getObjectIdentityParts(const ObjectAddress *object,
|
|||
NameStr(formDict->dictname)));
|
||||
if (objname)
|
||||
*objname = list_make2(schema,
|
||||
pstrdup(NameStr(formDict->dictname)));
|
||||
pstrdup(NameStr(formDict->dictname)));
|
||||
ReleaseSysCache(tup);
|
||||
break;
|
||||
}
|
||||
|
@ -4182,7 +4281,7 @@ getObjectIdentityParts(const ObjectAddress *object,
|
|||
NameStr(formTmpl->tmplname)));
|
||||
if (objname)
|
||||
*objname = list_make2(schema,
|
||||
pstrdup(NameStr(formTmpl->tmplname)));
|
||||
pstrdup(NameStr(formTmpl->tmplname)));
|
||||
ReleaseSysCache(tup);
|
||||
break;
|
||||
}
|
||||
|
@ -4510,10 +4609,10 @@ getRelationIdentity(StringInfo buffer, Oid relid, List **objname)
|
|||
ArrayType *
|
||||
strlist_to_textarray(List *list)
|
||||
{
|
||||
ArrayType *arr;
|
||||
Datum *datums;
|
||||
int j = 0;
|
||||
ListCell *cell;
|
||||
ArrayType *arr;
|
||||
Datum *datums;
|
||||
int j = 0;
|
||||
ListCell *cell;
|
||||
MemoryContext memcxt;
|
||||
MemoryContext oldcxt;
|
||||
|
||||
|
@ -4527,7 +4626,7 @@ strlist_to_textarray(List *list)
|
|||
datums = palloc(sizeof(text *) * list_length(list));
|
||||
foreach(cell, list)
|
||||
{
|
||||
char *name = lfirst(cell);
|
||||
char *name = lfirst(cell);
|
||||
|
||||
datums[j++] = CStringGetTextDatum(name);
|
||||
}
|
||||
|
|
|
@ -545,7 +545,7 @@ AggregateCreate(const char *aggName,
|
|||
parameterModes, /* parameterModes */
|
||||
parameterNames, /* parameterNames */
|
||||
parameterDefaults, /* parameterDefaults */
|
||||
PointerGetDatum(NULL), /* trftypes */
|
||||
PointerGetDatum(NULL), /* trftypes */
|
||||
PointerGetDatum(NULL), /* proconfig */
|
||||
1, /* procost */
|
||||
0); /* prorows */
|
||||
|
|
|
@ -346,7 +346,7 @@ restart:
|
|||
if (!OidIsValid(binary_upgrade_next_pg_enum_oid))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("pg_enum OID value not set when in binary upgrade mode")));
|
||||
errmsg("pg_enum OID value not set when in binary upgrade mode")));
|
||||
|
||||
/*
|
||||
* Use binary-upgrade override for pg_enum.oid, if supplied. During
|
||||
|
|
|
@ -1158,11 +1158,11 @@ fail:
|
|||
List *
|
||||
oid_array_to_list(Datum datum)
|
||||
{
|
||||
ArrayType *array = DatumGetArrayTypeP(datum);
|
||||
Datum *values;
|
||||
int nelems;
|
||||
int i;
|
||||
List *result = NIL;
|
||||
ArrayType *array = DatumGetArrayTypeP(datum);
|
||||
Datum *values;
|
||||
int nelems;
|
||||
int i;
|
||||
List *result = NIL;
|
||||
|
||||
deconstruct_array(array,
|
||||
OIDOID,
|
||||
|
|
|
@ -133,7 +133,7 @@ TypeShellMake(const char *typeName, Oid typeNamespace, Oid ownerId)
|
|||
if (!OidIsValid(binary_upgrade_next_pg_type_oid))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("pg_type OID value not set when in binary upgrade mode")));
|
||||
errmsg("pg_type OID value not set when in binary upgrade mode")));
|
||||
|
||||
HeapTupleSetOid(tup, binary_upgrade_next_pg_type_oid);
|
||||
binary_upgrade_next_pg_type_oid = InvalidOid;
|
||||
|
|
|
@ -175,9 +175,9 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid,
|
|||
/*
|
||||
* Check to see whether the table needs a TOAST table.
|
||||
*
|
||||
* If an update-in-place TOAST relfilenode is specified, force TOAST file
|
||||
* creation even if it seems not to need one. This handles the case
|
||||
* where the old cluster needed a TOAST table but the new cluster
|
||||
* If an update-in-place TOAST relfilenode is specified, force TOAST
|
||||
* file creation even if it seems not to need one. This handles the
|
||||
* case where the old cluster needed a TOAST table but the new cluster
|
||||
* would not normally create one.
|
||||
*/
|
||||
|
||||
|
@ -260,9 +260,9 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid,
|
|||
namespaceid = PG_TOAST_NAMESPACE;
|
||||
|
||||
/*
|
||||
* Use binary-upgrade override for pg_type.oid, if supplied. We might
|
||||
* be in the post-schema-restore phase where we are doing ALTER TABLE
|
||||
* to create TOAST tables that didn't exist in the old cluster.
|
||||
* Use binary-upgrade override for pg_type.oid, if supplied. We might be
|
||||
* in the post-schema-restore phase where we are doing ALTER TABLE to
|
||||
* create TOAST tables that didn't exist in the old cluster.
|
||||
*/
|
||||
if (IsBinaryUpgrade && OidIsValid(binary_upgrade_next_toast_pg_type_oid))
|
||||
{
|
||||
|
|
|
@ -2150,6 +2150,7 @@ compute_scalar_stats(VacAttrStatsP stats,
|
|||
/* We always use the default collation for statistics */
|
||||
ssup.ssup_collation = DEFAULT_COLLATION_OID;
|
||||
ssup.ssup_nulls_first = false;
|
||||
|
||||
/*
|
||||
* For now, don't perform abbreviated key conversion, because full values
|
||||
* are required for MCV slot generation. Supporting that optimization
|
||||
|
|
|
@ -861,8 +861,8 @@ DoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed)
|
|||
* RLS (returns RLS_ENABLED) or not for this COPY statement.
|
||||
*
|
||||
* If the relation has a row security policy and we are to apply it
|
||||
* then perform a "query" copy and allow the normal query processing to
|
||||
* handle the policies.
|
||||
* then perform a "query" copy and allow the normal query processing
|
||||
* to handle the policies.
|
||||
*
|
||||
* If RLS is not enabled for this, then just fall through to the
|
||||
* normal non-filtering relation handling.
|
||||
|
@ -877,7 +877,7 @@ DoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed)
|
|||
if (is_from)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("COPY FROM not supported with row level security."),
|
||||
errmsg("COPY FROM not supported with row level security."),
|
||||
errhint("Use direct INSERT statements instead.")));
|
||||
|
||||
/* Build target list */
|
||||
|
@ -904,7 +904,7 @@ DoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed)
|
|||
select->targetList = list_make1(target);
|
||||
select->fromClause = list_make1(from);
|
||||
|
||||
query = (Node*) select;
|
||||
query = (Node *) select;
|
||||
|
||||
/* Close the handle to the relation as it is no longer needed. */
|
||||
heap_close(rel, (is_from ? RowExclusiveLock : AccessShareLock));
|
||||
|
@ -1408,26 +1408,27 @@ BeginCopy(bool is_from,
|
|||
|
||||
/*
|
||||
* If we were passed in a relid, make sure we got the same one back
|
||||
* after planning out the query. It's possible that it changed between
|
||||
* when we checked the policies on the table and decided to use a query
|
||||
* and now.
|
||||
* after planning out the query. It's possible that it changed
|
||||
* between when we checked the policies on the table and decided to
|
||||
* use a query and now.
|
||||
*/
|
||||
if (queryRelId != InvalidOid)
|
||||
{
|
||||
Oid relid = linitial_oid(plan->relationOids);
|
||||
Oid relid = linitial_oid(plan->relationOids);
|
||||
|
||||
/*
|
||||
* There should only be one relationOid in this case, since we will
|
||||
* only get here when we have changed the command for the user from
|
||||
* a "COPY relation TO" to "COPY (SELECT * FROM relation) TO", to
|
||||
* allow row level security policies to be applied.
|
||||
* There should only be one relationOid in this case, since we
|
||||
* will only get here when we have changed the command for the
|
||||
* user from a "COPY relation TO" to "COPY (SELECT * FROM
|
||||
* relation) TO", to allow row level security policies to be
|
||||
* applied.
|
||||
*/
|
||||
Assert(list_length(plan->relationOids) == 1);
|
||||
|
||||
if (relid != queryRelId)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("relation referenced by COPY statement has changed")));
|
||||
errmsg("relation referenced by COPY statement has changed")));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2439,7 +2440,7 @@ CopyFrom(CopyState cstate)
|
|||
|
||||
if (resultRelInfo->ri_NumIndices > 0)
|
||||
recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
|
||||
estate, false, NULL,
|
||||
estate, false, NULL,
|
||||
NIL);
|
||||
|
||||
/* AFTER ROW INSERT Triggers */
|
||||
|
|
|
@ -89,7 +89,7 @@ ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString,
|
|||
|
||||
if (stmt->if_not_exists)
|
||||
{
|
||||
Oid nspid;
|
||||
Oid nspid;
|
||||
|
||||
nspid = RangeVarGetCreationNamespace(stmt->into->rel);
|
||||
|
||||
|
|
|
@ -554,8 +554,8 @@ createdb(const CreatedbStmt *stmt)
|
|||
* Force a checkpoint before starting the copy. This will force all dirty
|
||||
* buffers, including those of unlogged tables, out to disk, to ensure
|
||||
* source database is up-to-date on disk for the copy.
|
||||
* FlushDatabaseBuffers() would suffice for that, but we also want
|
||||
* to process any pending unlink requests. Otherwise, if a checkpoint
|
||||
* FlushDatabaseBuffers() would suffice for that, but we also want to
|
||||
* process any pending unlink requests. Otherwise, if a checkpoint
|
||||
* happened while we're copying files, a file might be deleted just when
|
||||
* we're about to copy it, causing the lstat() call in copydir() to fail
|
||||
* with ENOENT.
|
||||
|
@ -841,8 +841,8 @@ dropdb(const char *dbname, bool missing_ok)
|
|||
if (ReplicationSlotsCountDBSlots(db_id, &nslots, &nslots_active))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_OBJECT_IN_USE),
|
||||
errmsg("database \"%s\" is used by a logical replication slot",
|
||||
dbname),
|
||||
errmsg("database \"%s\" is used by a logical replication slot",
|
||||
dbname),
|
||||
errdetail_plural("There is %d slot, %d of them active.",
|
||||
"There are %d slots, %d of them active.",
|
||||
nslots,
|
||||
|
|
|
@ -415,7 +415,7 @@ does_not_exist_skipping(ObjectType objtype, List *objname, List *objargs)
|
|||
break;
|
||||
case OBJECT_OPCLASS:
|
||||
{
|
||||
List *opcname = list_copy_tail(objname, 1);
|
||||
List *opcname = list_copy_tail(objname, 1);
|
||||
|
||||
if (!schema_does_not_exist_skipping(opcname, &msg, &name))
|
||||
{
|
||||
|
@ -427,7 +427,7 @@ does_not_exist_skipping(ObjectType objtype, List *objname, List *objargs)
|
|||
break;
|
||||
case OBJECT_OPFAMILY:
|
||||
{
|
||||
List *opfname = list_copy_tail(objname, 1);
|
||||
List *opfname = list_copy_tail(objname, 1);
|
||||
|
||||
if (!schema_does_not_exist_skipping(opfname, &msg, &name))
|
||||
{
|
||||
|
|
|
@ -57,13 +57,15 @@ typedef struct EventTriggerQueryState
|
|||
bool in_sql_drop;
|
||||
|
||||
/* table_rewrite */
|
||||
Oid table_rewrite_oid; /* InvalidOid, or set for table_rewrite event */
|
||||
Oid table_rewrite_oid; /* InvalidOid, or set for
|
||||
* table_rewrite event */
|
||||
int table_rewrite_reason; /* AT_REWRITE reason */
|
||||
|
||||
/* Support for command collection */
|
||||
bool commandCollectionInhibited;
|
||||
CollectedCommand *currentCommand;
|
||||
List *commandList; /* list of CollectedCommand; see deparse_utility.h */
|
||||
List *commandList; /* list of CollectedCommand; see
|
||||
* deparse_utility.h */
|
||||
struct EventTriggerQueryState *previous;
|
||||
} EventTriggerQueryState;
|
||||
|
||||
|
@ -143,7 +145,7 @@ static void AlterEventTriggerOwner_internal(Relation rel,
|
|||
Oid newOwnerId);
|
||||
static event_trigger_command_tag_check_result check_ddl_tag(const char *tag);
|
||||
static event_trigger_command_tag_check_result check_table_rewrite_ddl_tag(
|
||||
const char *tag);
|
||||
const char *tag);
|
||||
static void error_duplicate_filter_variable(const char *defname);
|
||||
static Datum filter_list_to_array(List *filterlist);
|
||||
static Oid insert_event_trigger_tuple(char *trigname, char *eventname,
|
||||
|
@ -714,7 +716,7 @@ EventTriggerCommonSetup(Node *parsetree,
|
|||
|
||||
dbgtag = CreateCommandTag(parsetree);
|
||||
if (event == EVT_DDLCommandStart ||
|
||||
event == EVT_DDLCommandEnd ||
|
||||
event == EVT_DDLCommandEnd ||
|
||||
event == EVT_SQLDrop)
|
||||
{
|
||||
if (check_ddl_tag(dbgtag) != EVENT_TRIGGER_COMMAND_TAG_OK)
|
||||
|
@ -1562,8 +1564,8 @@ pg_event_trigger_table_rewrite_oid(PG_FUNCTION_ARGS)
|
|||
currentEventTriggerState->table_rewrite_oid == InvalidOid)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED),
|
||||
errmsg("%s can only be called in a table_rewrite event trigger function",
|
||||
"pg_event_trigger_table_rewrite_oid()")));
|
||||
errmsg("%s can only be called in a table_rewrite event trigger function",
|
||||
"pg_event_trigger_table_rewrite_oid()")));
|
||||
|
||||
PG_RETURN_OID(currentEventTriggerState->table_rewrite_oid);
|
||||
}
|
||||
|
@ -1583,8 +1585,8 @@ pg_event_trigger_table_rewrite_reason(PG_FUNCTION_ARGS)
|
|||
currentEventTriggerState->table_rewrite_reason == 0)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED),
|
||||
errmsg("%s can only be called in a table_rewrite event trigger function",
|
||||
"pg_event_trigger_table_rewrite_reason()")));
|
||||
errmsg("%s can only be called in a table_rewrite event trigger function",
|
||||
"pg_event_trigger_table_rewrite_reason()")));
|
||||
|
||||
PG_RETURN_INT32(currentEventTriggerState->table_rewrite_reason);
|
||||
}
|
||||
|
@ -1672,7 +1674,7 @@ EventTriggerCollectSimpleCommand(ObjectAddress address,
|
|||
command->parsetree = copyObject(parsetree);
|
||||
|
||||
currentEventTriggerState->commandList = lappend(currentEventTriggerState->commandList,
|
||||
command);
|
||||
command);
|
||||
|
||||
MemoryContextSwitchTo(oldcxt);
|
||||
}
|
||||
|
@ -1687,13 +1689,13 @@ EventTriggerCollectSimpleCommand(ObjectAddress address,
|
|||
*
|
||||
* XXX -- this API isn't considering the possibility of an ALTER TABLE command
|
||||
* being called reentrantly by an event trigger function. Do we need stackable
|
||||
* commands at this level? Perhaps at least we should detect the condition and
|
||||
* commands at this level? Perhaps at least we should detect the condition and
|
||||
* raise an error.
|
||||
*/
|
||||
void
|
||||
EventTriggerAlterTableStart(Node *parsetree)
|
||||
{
|
||||
MemoryContext oldcxt;
|
||||
MemoryContext oldcxt;
|
||||
CollectedCommand *command;
|
||||
|
||||
/* ignore if event trigger context not set, or collection disabled */
|
||||
|
@ -1744,7 +1746,7 @@ EventTriggerAlterTableRelid(Oid objectId)
|
|||
void
|
||||
EventTriggerCollectAlterTableSubcmd(Node *subcmd, ObjectAddress address)
|
||||
{
|
||||
MemoryContext oldcxt;
|
||||
MemoryContext oldcxt;
|
||||
CollectedATSubcmd *newsub;
|
||||
|
||||
/* ignore if event trigger context not set, or collection disabled */
|
||||
|
@ -1808,8 +1810,8 @@ EventTriggerCollectGrant(InternalGrant *istmt)
|
|||
{
|
||||
MemoryContext oldcxt;
|
||||
CollectedCommand *command;
|
||||
InternalGrant *icopy;
|
||||
ListCell *cell;
|
||||
InternalGrant *icopy;
|
||||
ListCell *cell;
|
||||
|
||||
/* ignore if event trigger context not set, or collection disabled */
|
||||
if (!currentEventTriggerState ||
|
||||
|
@ -1849,9 +1851,9 @@ EventTriggerCollectGrant(InternalGrant *istmt)
|
|||
*/
|
||||
void
|
||||
EventTriggerCollectAlterOpFam(AlterOpFamilyStmt *stmt, Oid opfamoid,
|
||||
List *operators, List *procedures)
|
||||
List *operators, List *procedures)
|
||||
{
|
||||
MemoryContext oldcxt;
|
||||
MemoryContext oldcxt;
|
||||
CollectedCommand *command;
|
||||
|
||||
/* ignore if event trigger context not set, or collection disabled */
|
||||
|
@ -1882,9 +1884,9 @@ EventTriggerCollectAlterOpFam(AlterOpFamilyStmt *stmt, Oid opfamoid,
|
|||
*/
|
||||
void
|
||||
EventTriggerCollectCreateOpClass(CreateOpClassStmt *stmt, Oid opcoid,
|
||||
List *operators, List *procedures)
|
||||
List *operators, List *procedures)
|
||||
{
|
||||
MemoryContext oldcxt;
|
||||
MemoryContext oldcxt;
|
||||
CollectedCommand *command;
|
||||
|
||||
/* ignore if event trigger context not set, or collection disabled */
|
||||
|
@ -1918,7 +1920,7 @@ void
|
|||
EventTriggerCollectAlterTSConfig(AlterTSConfigurationStmt *stmt, Oid cfgId,
|
||||
Oid *dictIds, int ndicts)
|
||||
{
|
||||
MemoryContext oldcxt;
|
||||
MemoryContext oldcxt;
|
||||
CollectedCommand *command;
|
||||
|
||||
/* ignore if event trigger context not set, or collection disabled */
|
||||
|
@ -1952,7 +1954,7 @@ EventTriggerCollectAlterTSConfig(AlterTSConfigurationStmt *stmt, Oid cfgId,
|
|||
void
|
||||
EventTriggerCollectAlterDefPrivs(AlterDefaultPrivilegesStmt *stmt)
|
||||
{
|
||||
MemoryContext oldcxt;
|
||||
MemoryContext oldcxt;
|
||||
CollectedCommand *command;
|
||||
|
||||
/* ignore if event trigger context not set, or collection disabled */
|
||||
|
@ -2034,10 +2036,10 @@ pg_event_trigger_ddl_commands(PG_FUNCTION_ARGS)
|
|||
* object, the returned OID is Invalid. Don't return anything.
|
||||
*
|
||||
* One might think that a viable alternative would be to look up the
|
||||
* Oid of the existing object and run the deparse with that. But since
|
||||
* the parse tree might be different from the one that created the
|
||||
* object in the first place, we might not end up in a consistent state
|
||||
* anyway.
|
||||
* Oid of the existing object and run the deparse with that. But
|
||||
* since the parse tree might be different from the one that created
|
||||
* the object in the first place, we might not end up in a consistent
|
||||
* state anyway.
|
||||
*/
|
||||
if (cmd->type == SCT_Simple &&
|
||||
!OidIsValid(cmd->d.simple.address.objectId))
|
||||
|
@ -2074,10 +2076,10 @@ pg_event_trigger_ddl_commands(PG_FUNCTION_ARGS)
|
|||
identity = getObjectIdentity(&addr);
|
||||
|
||||
/*
|
||||
* Obtain schema name, if any ("pg_temp" if a temp object).
|
||||
* If the object class is not in the supported list here,
|
||||
* we assume it's a schema-less object type, and thus
|
||||
* "schema" remains set to NULL.
|
||||
* Obtain schema name, if any ("pg_temp" if a temp
|
||||
* object). If the object class is not in the supported
|
||||
* list here, we assume it's a schema-less object type,
|
||||
* and thus "schema" remains set to NULL.
|
||||
*/
|
||||
if (is_objectclass_supported(addr.classId))
|
||||
{
|
||||
|
@ -2099,10 +2101,10 @@ pg_event_trigger_ddl_commands(PG_FUNCTION_ARGS)
|
|||
addr.classId, addr.objectId);
|
||||
schema_oid =
|
||||
heap_getattr(objtup, nspAttnum,
|
||||
RelationGetDescr(catalog), &isnull);
|
||||
RelationGetDescr(catalog), &isnull);
|
||||
if (isnull)
|
||||
elog(ERROR,
|
||||
"invalid null namespace in object %u/%u/%d",
|
||||
"invalid null namespace in object %u/%u/%d",
|
||||
addr.classId, addr.objectId, addr.objectSubId);
|
||||
/* XXX not quite get_namespace_name_or_temp */
|
||||
if (isAnyTempNamespace(schema_oid))
|
||||
|
@ -2149,7 +2151,7 @@ pg_event_trigger_ddl_commands(PG_FUNCTION_ARGS)
|
|||
values[i++] = CStringGetTextDatum(CreateCommandTag(cmd->parsetree));
|
||||
/* object_type */
|
||||
values[i++] = CStringGetTextDatum(stringify_adefprivs_objtype(
|
||||
cmd->d.defprivs.objtype));
|
||||
cmd->d.defprivs.objtype));
|
||||
/* schema */
|
||||
nulls[i++] = true;
|
||||
/* identity */
|
||||
|
@ -2172,7 +2174,7 @@ pg_event_trigger_ddl_commands(PG_FUNCTION_ARGS)
|
|||
"GRANT" : "REVOKE");
|
||||
/* object_type */
|
||||
values[i++] = CStringGetTextDatum(stringify_grantobjtype(
|
||||
cmd->d.grant.istmt->objtype));
|
||||
cmd->d.grant.istmt->objtype));
|
||||
/* schema */
|
||||
nulls[i++] = true;
|
||||
/* identity */
|
||||
|
@ -2230,7 +2232,7 @@ stringify_grantobjtype(GrantObjectType objtype)
|
|||
return "TYPE";
|
||||
default:
|
||||
elog(ERROR, "unrecognized type %d", objtype);
|
||||
return "???"; /* keep compiler quiet */
|
||||
return "???"; /* keep compiler quiet */
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2257,6 +2259,6 @@ stringify_adefprivs_objtype(GrantObjectType objtype)
|
|||
break;
|
||||
default:
|
||||
elog(ERROR, "unrecognized type %d", objtype);
|
||||
return "???"; /* keep compiler quiet */
|
||||
return "???"; /* keep compiler quiet */
|
||||
}
|
||||
}
|
||||
|
|
|
@ -83,11 +83,11 @@ static void show_merge_append_keys(MergeAppendState *mstate, List *ancestors,
|
|||
static void show_agg_keys(AggState *astate, List *ancestors,
|
||||
ExplainState *es);
|
||||
static void show_grouping_sets(PlanState *planstate, Agg *agg,
|
||||
List *ancestors, ExplainState *es);
|
||||
List *ancestors, ExplainState *es);
|
||||
static void show_grouping_set_keys(PlanState *planstate,
|
||||
Agg *aggnode, Sort *sortnode,
|
||||
List *context, bool useprefix,
|
||||
List *ancestors, ExplainState *es);
|
||||
Agg *aggnode, Sort *sortnode,
|
||||
List *context, bool useprefix,
|
||||
List *ancestors, ExplainState *es);
|
||||
static void show_group_keys(GroupState *gstate, List *ancestors,
|
||||
ExplainState *es);
|
||||
static void show_sort_group_keys(PlanState *planstate, const char *qlabel,
|
||||
|
@ -754,7 +754,7 @@ ExplainPreScanNode(PlanState *planstate, Bitmapset **rels_used)
|
|||
((ModifyTable *) plan)->nominalRelation);
|
||||
if (((ModifyTable *) plan)->exclRelRTI)
|
||||
*rels_used = bms_add_member(*rels_used,
|
||||
((ModifyTable *) plan)->exclRelRTI);
|
||||
((ModifyTable *) plan)->exclRelRTI);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -984,6 +984,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
|
|||
* quite messy.
|
||||
*/
|
||||
RangeTblEntry *rte;
|
||||
|
||||
rte = rt_fetch(((SampleScan *) plan)->scanrelid, es->rtable);
|
||||
custom_name = get_tablesample_method_name(rte->tablesample->tsmid);
|
||||
pname = psprintf("Sample Scan (%s)", custom_name);
|
||||
|
@ -1895,8 +1896,8 @@ show_grouping_sets(PlanState *planstate, Agg *agg,
|
|||
|
||||
foreach(lc, agg->chain)
|
||||
{
|
||||
Agg *aggnode = lfirst(lc);
|
||||
Sort *sortnode = (Sort *) aggnode->plan.lefttree;
|
||||
Agg *aggnode = lfirst(lc);
|
||||
Sort *sortnode = (Sort *) aggnode->plan.lefttree;
|
||||
|
||||
show_grouping_set_keys(planstate, aggnode, sortnode,
|
||||
context, useprefix, ancestors, es);
|
||||
|
@ -2561,7 +2562,7 @@ show_modifytable_info(ModifyTableState *mtstate, List *ancestors,
|
|||
{
|
||||
ExplainProperty("Conflict Resolution",
|
||||
node->onConflictAction == ONCONFLICT_NOTHING ?
|
||||
"NOTHING" : "UPDATE",
|
||||
"NOTHING" : "UPDATE",
|
||||
false, es);
|
||||
|
||||
/*
|
||||
|
@ -2582,9 +2583,9 @@ show_modifytable_info(ModifyTableState *mtstate, List *ancestors,
|
|||
/* EXPLAIN ANALYZE display of actual outcome for each tuple proposed */
|
||||
if (es->analyze && mtstate->ps.instrument)
|
||||
{
|
||||
double total;
|
||||
double insert_path;
|
||||
double other_path;
|
||||
double total;
|
||||
double insert_path;
|
||||
double other_path;
|
||||
|
||||
InstrEndLoop(mtstate->mt_plans[0]->instrument);
|
||||
|
||||
|
|
|
@ -921,9 +921,9 @@ CreateFunction(CreateFunctionStmt *stmt, const char *queryString)
|
|||
ReleaseSysCache(languageTuple);
|
||||
|
||||
/*
|
||||
* Only superuser is allowed to create leakproof functions because leakproof
|
||||
* functions can see tuples which have not yet been filtered out by security
|
||||
* barrier views or row level security policies.
|
||||
* Only superuser is allowed to create leakproof functions because
|
||||
* leakproof functions can see tuples which have not yet been filtered out
|
||||
* by security barrier views or row level security policies.
|
||||
*/
|
||||
if (isLeakProof && !superuser())
|
||||
ereport(ERROR,
|
||||
|
@ -932,14 +932,15 @@ CreateFunction(CreateFunctionStmt *stmt, const char *queryString)
|
|||
|
||||
if (transformDefElem)
|
||||
{
|
||||
ListCell *lc;
|
||||
ListCell *lc;
|
||||
|
||||
Assert(IsA(transformDefElem, List));
|
||||
|
||||
foreach (lc, (List *) transformDefElem)
|
||||
foreach(lc, (List *) transformDefElem)
|
||||
{
|
||||
Oid typeid = typenameTypeId(NULL, lfirst(lc));
|
||||
Oid elt = get_base_element_type(typeid);
|
||||
Oid typeid = typenameTypeId(NULL, lfirst(lc));
|
||||
Oid elt = get_base_element_type(typeid);
|
||||
|
||||
typeid = elt ? elt : typeid;
|
||||
|
||||
get_transform_oid(typeid, languageOid, false);
|
||||
|
@ -992,13 +993,13 @@ CreateFunction(CreateFunctionStmt *stmt, const char *queryString)
|
|||
|
||||
if (list_length(trftypes_list) > 0)
|
||||
{
|
||||
ListCell *lc;
|
||||
Datum *arr;
|
||||
int i;
|
||||
ListCell *lc;
|
||||
Datum *arr;
|
||||
int i;
|
||||
|
||||
arr = palloc(list_length(trftypes_list) * sizeof(Datum));
|
||||
i = 0;
|
||||
foreach (lc, trftypes_list)
|
||||
foreach(lc, trftypes_list)
|
||||
arr[i++] = ObjectIdGetDatum(lfirst_oid(lc));
|
||||
trftypes = construct_array(arr, list_length(trftypes_list),
|
||||
OIDOID, sizeof(Oid), true, 'i');
|
||||
|
@ -1716,7 +1717,7 @@ check_transform_function(Form_pg_proc procstruct)
|
|||
if (procstruct->proisagg)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
|
||||
errmsg("transform function must not be an aggregate function")));
|
||||
errmsg("transform function must not be an aggregate function")));
|
||||
if (procstruct->proiswindow)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
|
||||
|
@ -1867,9 +1868,9 @@ CreateTransform(CreateTransformStmt *stmt)
|
|||
if (!stmt->replace)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DUPLICATE_OBJECT),
|
||||
errmsg("transform for type %s language \"%s\" already exists",
|
||||
format_type_be(typeid),
|
||||
stmt->lang)));
|
||||
errmsg("transform for type %s language \"%s\" already exists",
|
||||
format_type_be(typeid),
|
||||
stmt->lang)));
|
||||
|
||||
MemSet(replaces, false, sizeof(replaces));
|
||||
replaces[Anum_pg_transform_trffromsql - 1] = true;
|
||||
|
@ -1958,9 +1959,9 @@ get_transform_oid(Oid type_id, Oid lang_id, bool missing_ok)
|
|||
if (!OidIsValid(oid) && !missing_ok)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
||||
errmsg("transform for type %s language \"%s\" does not exist",
|
||||
format_type_be(type_id),
|
||||
get_language_name(lang_id, false))));
|
||||
errmsg("transform for type %s language \"%s\" does not exist",
|
||||
format_type_be(type_id),
|
||||
get_language_name(lang_id, false))));
|
||||
return oid;
|
||||
}
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@ static char *make_temptable_name_n(char *tempname, int n);
|
|||
static void mv_GenerateOper(StringInfo buf, Oid opoid);
|
||||
|
||||
static void refresh_by_match_merge(Oid matviewOid, Oid tempOid, Oid relowner,
|
||||
int save_sec_context);
|
||||
int save_sec_context);
|
||||
static void refresh_by_heap_swap(Oid matviewOid, Oid OIDNewHeap, char relpersistence);
|
||||
|
||||
static void OpenMatViewIncrementalMaintenance(void);
|
||||
|
|
|
@ -45,27 +45,27 @@
|
|||
#include "utils/syscache.h"
|
||||
|
||||
static void RangeVarCallbackForPolicy(const RangeVar *rv,
|
||||
Oid relid, Oid oldrelid, void *arg);
|
||||
Oid relid, Oid oldrelid, void *arg);
|
||||
static char parse_policy_command(const char *cmd_name);
|
||||
static ArrayType* policy_role_list_to_array(List *roles);
|
||||
static ArrayType *policy_role_list_to_array(List *roles);
|
||||
|
||||
/*
|
||||
* Callback to RangeVarGetRelidExtended().
|
||||
*
|
||||
* Checks the following:
|
||||
* - the relation specified is a table.
|
||||
* - current user owns the table.
|
||||
* - the table is not a system table.
|
||||
* - the relation specified is a table.
|
||||
* - current user owns the table.
|
||||
* - the table is not a system table.
|
||||
*
|
||||
* If any of these checks fails then an error is raised.
|
||||
*/
|
||||
static void
|
||||
RangeVarCallbackForPolicy(const RangeVar *rv, Oid relid, Oid oldrelid,
|
||||
void *arg)
|
||||
void *arg)
|
||||
{
|
||||
HeapTuple tuple;
|
||||
Form_pg_class classform;
|
||||
char relkind;
|
||||
HeapTuple tuple;
|
||||
Form_pg_class classform;
|
||||
char relkind;
|
||||
|
||||
tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
|
||||
if (!HeapTupleIsValid(tuple))
|
||||
|
@ -96,8 +96,8 @@ RangeVarCallbackForPolicy(const RangeVar *rv, Oid relid, Oid oldrelid,
|
|||
|
||||
/*
|
||||
* parse_policy_command -
|
||||
* helper function to convert full command strings to their char
|
||||
* representation.
|
||||
* helper function to convert full command strings to their char
|
||||
* representation.
|
||||
*
|
||||
* cmd_name - full string command name. Valid values are 'all', 'select',
|
||||
* 'insert', 'update' and 'delete'.
|
||||
|
@ -106,7 +106,7 @@ RangeVarCallbackForPolicy(const RangeVar *rv, Oid relid, Oid oldrelid,
|
|||
static char
|
||||
parse_policy_command(const char *cmd_name)
|
||||
{
|
||||
char cmd;
|
||||
char cmd;
|
||||
|
||||
if (!cmd_name)
|
||||
elog(ERROR, "unrecognized policy command");
|
||||
|
@ -129,7 +129,7 @@ parse_policy_command(const char *cmd_name)
|
|||
|
||||
/*
|
||||
* policy_role_list_to_array
|
||||
* helper function to convert a list of RoleSpecs to an array of role ids.
|
||||
* helper function to convert a list of RoleSpecs to an array of role ids.
|
||||
*/
|
||||
static ArrayType *
|
||||
policy_role_list_to_array(List *roles)
|
||||
|
@ -156,7 +156,7 @@ policy_role_list_to_array(List *roles)
|
|||
|
||||
foreach(cell, roles)
|
||||
{
|
||||
RoleSpec *spec = lfirst(cell);
|
||||
RoleSpec *spec = lfirst(cell);
|
||||
|
||||
/*
|
||||
* PUBLIC covers all roles, so it only makes sense alone.
|
||||
|
@ -167,7 +167,7 @@ policy_role_list_to_array(List *roles)
|
|||
ereport(WARNING,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("ignoring roles specified other than public"),
|
||||
errhint("All roles are members of the public role.")));
|
||||
errhint("All roles are members of the public role.")));
|
||||
temp_array[0] = ObjectIdGetDatum(ACL_ID_PUBLIC);
|
||||
num_roles = 1;
|
||||
break;
|
||||
|
@ -193,14 +193,14 @@ policy_role_list_to_array(List *roles)
|
|||
void
|
||||
RelationBuildRowSecurity(Relation relation)
|
||||
{
|
||||
MemoryContext rscxt;
|
||||
MemoryContext oldcxt = CurrentMemoryContext;
|
||||
RowSecurityDesc * volatile rsdesc = NULL;
|
||||
MemoryContext rscxt;
|
||||
MemoryContext oldcxt = CurrentMemoryContext;
|
||||
RowSecurityDesc *volatile rsdesc = NULL;
|
||||
|
||||
/*
|
||||
* Create a memory context to hold everything associated with this
|
||||
* relation's row security policy. This makes it easy to clean up
|
||||
* during a relcache flush.
|
||||
* relation's row security policy. This makes it easy to clean up during
|
||||
* a relcache flush.
|
||||
*/
|
||||
rscxt = AllocSetContextCreate(CacheMemoryContext,
|
||||
"row security descriptor",
|
||||
|
@ -209,15 +209,15 @@ RelationBuildRowSecurity(Relation relation)
|
|||
ALLOCSET_SMALL_MAXSIZE);
|
||||
|
||||
/*
|
||||
* Since rscxt lives under CacheMemoryContext, it is long-lived. Use
|
||||
* a PG_TRY block to ensure it'll get freed if we fail partway through.
|
||||
* Since rscxt lives under CacheMemoryContext, it is long-lived. Use a
|
||||
* PG_TRY block to ensure it'll get freed if we fail partway through.
|
||||
*/
|
||||
PG_TRY();
|
||||
{
|
||||
Relation catalog;
|
||||
ScanKeyData skey;
|
||||
SysScanDesc sscan;
|
||||
HeapTuple tuple;
|
||||
Relation catalog;
|
||||
ScanKeyData skey;
|
||||
SysScanDesc sscan;
|
||||
HeapTuple tuple;
|
||||
|
||||
rsdesc = MemoryContextAllocZero(rscxt, sizeof(RowSecurityDesc));
|
||||
rsdesc->rscxt = rscxt;
|
||||
|
@ -238,17 +238,17 @@ RelationBuildRowSecurity(Relation relation)
|
|||
*/
|
||||
while (HeapTupleIsValid(tuple = systable_getnext(sscan)))
|
||||
{
|
||||
Datum value_datum;
|
||||
char cmd_value;
|
||||
Datum roles_datum;
|
||||
char *qual_value;
|
||||
Expr *qual_expr;
|
||||
char *with_check_value;
|
||||
Expr *with_check_qual;
|
||||
char *policy_name_value;
|
||||
Oid policy_id;
|
||||
bool isnull;
|
||||
RowSecurityPolicy *policy;
|
||||
Datum value_datum;
|
||||
char cmd_value;
|
||||
Datum roles_datum;
|
||||
char *qual_value;
|
||||
Expr *qual_expr;
|
||||
char *with_check_value;
|
||||
Expr *with_check_qual;
|
||||
char *policy_name_value;
|
||||
Oid policy_id;
|
||||
bool isnull;
|
||||
RowSecurityPolicy *policy;
|
||||
|
||||
/*
|
||||
* Note: all the pass-by-reference data we collect here is either
|
||||
|
@ -259,26 +259,26 @@ RelationBuildRowSecurity(Relation relation)
|
|||
|
||||
/* Get policy command */
|
||||
value_datum = heap_getattr(tuple, Anum_pg_policy_polcmd,
|
||||
RelationGetDescr(catalog), &isnull);
|
||||
RelationGetDescr(catalog), &isnull);
|
||||
Assert(!isnull);
|
||||
cmd_value = DatumGetChar(value_datum);
|
||||
|
||||
/* Get policy name */
|
||||
value_datum = heap_getattr(tuple, Anum_pg_policy_polname,
|
||||
RelationGetDescr(catalog), &isnull);
|
||||
RelationGetDescr(catalog), &isnull);
|
||||
Assert(!isnull);
|
||||
policy_name_value = NameStr(*(DatumGetName(value_datum)));
|
||||
|
||||
/* Get policy roles */
|
||||
roles_datum = heap_getattr(tuple, Anum_pg_policy_polroles,
|
||||
RelationGetDescr(catalog), &isnull);
|
||||
RelationGetDescr(catalog), &isnull);
|
||||
/* shouldn't be null, but initdb doesn't mark it so, so check */
|
||||
if (isnull)
|
||||
elog(ERROR, "unexpected null value in pg_policy.polroles");
|
||||
|
||||
/* Get policy qual */
|
||||
value_datum = heap_getattr(tuple, Anum_pg_policy_polqual,
|
||||
RelationGetDescr(catalog), &isnull);
|
||||
RelationGetDescr(catalog), &isnull);
|
||||
if (!isnull)
|
||||
{
|
||||
qual_value = TextDatumGetCString(value_datum);
|
||||
|
@ -289,7 +289,7 @@ RelationBuildRowSecurity(Relation relation)
|
|||
|
||||
/* Get WITH CHECK qual */
|
||||
value_datum = heap_getattr(tuple, Anum_pg_policy_polwithcheck,
|
||||
RelationGetDescr(catalog), &isnull);
|
||||
RelationGetDescr(catalog), &isnull);
|
||||
if (!isnull)
|
||||
{
|
||||
with_check_value = TextDatumGetCString(value_datum);
|
||||
|
@ -311,7 +311,7 @@ RelationBuildRowSecurity(Relation relation)
|
|||
policy->qual = copyObject(qual_expr);
|
||||
policy->with_check_qual = copyObject(with_check_qual);
|
||||
policy->hassublinks = checkExprHasSubLink((Node *) qual_expr) ||
|
||||
checkExprHasSubLink((Node *) with_check_qual);
|
||||
checkExprHasSubLink((Node *) with_check_qual);
|
||||
|
||||
rsdesc->policies = lcons(policy, rsdesc->policies);
|
||||
|
||||
|
@ -330,15 +330,15 @@ RelationBuildRowSecurity(Relation relation)
|
|||
/*
|
||||
* Check if no policies were added
|
||||
*
|
||||
* If no policies exist in pg_policy for this relation, then we
|
||||
* need to create a single default-deny policy. We use InvalidOid for
|
||||
* the Oid to indicate that this is the default-deny policy (we may
|
||||
* decide to ignore the default policy if an extension adds policies).
|
||||
* If no policies exist in pg_policy for this relation, then we need
|
||||
* to create a single default-deny policy. We use InvalidOid for the
|
||||
* Oid to indicate that this is the default-deny policy (we may decide
|
||||
* to ignore the default policy if an extension adds policies).
|
||||
*/
|
||||
if (rsdesc->policies == NIL)
|
||||
{
|
||||
RowSecurityPolicy *policy;
|
||||
Datum role;
|
||||
RowSecurityPolicy *policy;
|
||||
Datum role;
|
||||
|
||||
MemoryContextSwitchTo(rscxt);
|
||||
|
||||
|
@ -351,7 +351,7 @@ RelationBuildRowSecurity(Relation relation)
|
|||
policy->roles = construct_array(&role, 1, OIDOID, sizeof(Oid), true,
|
||||
'i');
|
||||
policy->qual = (Expr *) makeConst(BOOLOID, -1, InvalidOid,
|
||||
sizeof(bool), BoolGetDatum(false),
|
||||
sizeof(bool), BoolGetDatum(false),
|
||||
false, true);
|
||||
policy->with_check_qual = copyObject(policy->qual);
|
||||
policy->hassublinks = false;
|
||||
|
@ -376,15 +376,15 @@ RelationBuildRowSecurity(Relation relation)
|
|||
|
||||
/*
|
||||
* RemovePolicyById -
|
||||
* remove a policy by its OID. If a policy does not exist with the provided
|
||||
* oid, then an error is raised.
|
||||
* remove a policy by its OID. If a policy does not exist with the provided
|
||||
* oid, then an error is raised.
|
||||
*
|
||||
* policy_id - the oid of the policy.
|
||||
*/
|
||||
void
|
||||
RemovePolicyById(Oid policy_id)
|
||||
{
|
||||
Relation pg_policy_rel;
|
||||
Relation pg_policy_rel;
|
||||
SysScanDesc sscan;
|
||||
ScanKeyData skey[1];
|
||||
HeapTuple tuple;
|
||||
|
@ -435,8 +435,8 @@ RemovePolicyById(Oid policy_id)
|
|||
|
||||
/*
|
||||
* Note that, unlike some of the other flags in pg_class, relrowsecurity
|
||||
* is not just an indication of if policies exist. When relrowsecurity
|
||||
* is set by a user, then all access to the relation must be through a
|
||||
* is not just an indication of if policies exist. When relrowsecurity is
|
||||
* set by a user, then all access to the relation must be through a
|
||||
* policy. If no policy is defined for the relation then a default-deny
|
||||
* policy is created and all records are filtered (except for queries from
|
||||
* the owner).
|
||||
|
@ -450,31 +450,31 @@ RemovePolicyById(Oid policy_id)
|
|||
|
||||
/*
|
||||
* CreatePolicy -
|
||||
* handles the execution of the CREATE POLICY command.
|
||||
* handles the execution of the CREATE POLICY command.
|
||||
*
|
||||
* stmt - the CreatePolicyStmt that describes the policy to create.
|
||||
*/
|
||||
ObjectAddress
|
||||
CreatePolicy(CreatePolicyStmt *stmt)
|
||||
{
|
||||
Relation pg_policy_rel;
|
||||
Oid policy_id;
|
||||
Relation target_table;
|
||||
Oid table_id;
|
||||
char polcmd;
|
||||
ArrayType *role_ids;
|
||||
ParseState *qual_pstate;
|
||||
ParseState *with_check_pstate;
|
||||
RangeTblEntry *rte;
|
||||
Node *qual;
|
||||
Node *with_check_qual;
|
||||
ScanKeyData skey[2];
|
||||
SysScanDesc sscan;
|
||||
HeapTuple policy_tuple;
|
||||
Datum values[Natts_pg_policy];
|
||||
bool isnull[Natts_pg_policy];
|
||||
ObjectAddress target;
|
||||
ObjectAddress myself;
|
||||
Relation pg_policy_rel;
|
||||
Oid policy_id;
|
||||
Relation target_table;
|
||||
Oid table_id;
|
||||
char polcmd;
|
||||
ArrayType *role_ids;
|
||||
ParseState *qual_pstate;
|
||||
ParseState *with_check_pstate;
|
||||
RangeTblEntry *rte;
|
||||
Node *qual;
|
||||
Node *with_check_qual;
|
||||
ScanKeyData skey[2];
|
||||
SysScanDesc sscan;
|
||||
HeapTuple policy_tuple;
|
||||
Datum values[Natts_pg_policy];
|
||||
bool isnull[Natts_pg_policy];
|
||||
ObjectAddress target;
|
||||
ObjectAddress myself;
|
||||
|
||||
/* Parse command */
|
||||
polcmd = parse_policy_command(stmt->cmd);
|
||||
|
@ -506,8 +506,8 @@ CreatePolicy(CreatePolicyStmt *stmt)
|
|||
with_check_pstate = make_parsestate(NULL);
|
||||
|
||||
/* zero-clear */
|
||||
memset(values, 0, sizeof(values));
|
||||
memset(isnull, 0, sizeof(isnull));
|
||||
memset(values, 0, sizeof(values));
|
||||
memset(isnull, 0, sizeof(isnull));
|
||||
|
||||
/* Get id of table. Also handles permissions checks. */
|
||||
table_id = RangeVarGetRelidExtended(stmt->table, AccessExclusiveLock,
|
||||
|
@ -515,7 +515,7 @@ CreatePolicy(CreatePolicyStmt *stmt)
|
|||
RangeVarCallbackForPolicy,
|
||||
(void *) stmt);
|
||||
|
||||
/* Open target_table to build quals. No lock is necessary.*/
|
||||
/* Open target_table to build quals. No lock is necessary. */
|
||||
target_table = relation_open(table_id, NoLock);
|
||||
|
||||
/* Add for the regular security quals */
|
||||
|
@ -534,9 +534,9 @@ CreatePolicy(CreatePolicyStmt *stmt)
|
|||
"POLICY");
|
||||
|
||||
with_check_qual = transformWhereClause(with_check_pstate,
|
||||
copyObject(stmt->with_check),
|
||||
EXPR_KIND_WHERE,
|
||||
"POLICY");
|
||||
copyObject(stmt->with_check),
|
||||
EXPR_KIND_WHERE,
|
||||
"POLICY");
|
||||
|
||||
/* Open pg_policy catalog */
|
||||
pg_policy_rel = heap_open(PolicyRelationId, RowExclusiveLock);
|
||||
|
@ -568,7 +568,7 @@ CreatePolicy(CreatePolicyStmt *stmt)
|
|||
|
||||
values[Anum_pg_policy_polrelid - 1] = ObjectIdGetDatum(table_id);
|
||||
values[Anum_pg_policy_polname - 1] = DirectFunctionCall1(namein,
|
||||
CStringGetDatum(stmt->policy_name));
|
||||
CStringGetDatum(stmt->policy_name));
|
||||
values[Anum_pg_policy_polcmd - 1] = CharGetDatum(polcmd);
|
||||
values[Anum_pg_policy_polroles - 1] = PointerGetDatum(role_ids);
|
||||
|
||||
|
@ -625,34 +625,34 @@ CreatePolicy(CreatePolicyStmt *stmt)
|
|||
|
||||
/*
|
||||
* AlterPolicy -
|
||||
* handles the execution of the ALTER POLICY command.
|
||||
* handles the execution of the ALTER POLICY command.
|
||||
*
|
||||
* stmt - the AlterPolicyStmt that describes the policy and how to alter it.
|
||||
*/
|
||||
ObjectAddress
|
||||
AlterPolicy(AlterPolicyStmt *stmt)
|
||||
{
|
||||
Relation pg_policy_rel;
|
||||
Oid policy_id;
|
||||
Relation target_table;
|
||||
Oid table_id;
|
||||
ArrayType *role_ids = NULL;
|
||||
List *qual_parse_rtable = NIL;
|
||||
List *with_check_parse_rtable = NIL;
|
||||
Node *qual = NULL;
|
||||
Node *with_check_qual = NULL;
|
||||
ScanKeyData skey[2];
|
||||
SysScanDesc sscan;
|
||||
HeapTuple policy_tuple;
|
||||
HeapTuple new_tuple;
|
||||
Datum values[Natts_pg_policy];
|
||||
bool isnull[Natts_pg_policy];
|
||||
bool replaces[Natts_pg_policy];
|
||||
ObjectAddress target;
|
||||
ObjectAddress myself;
|
||||
Datum cmd_datum;
|
||||
char polcmd;
|
||||
bool polcmd_isnull;
|
||||
Relation pg_policy_rel;
|
||||
Oid policy_id;
|
||||
Relation target_table;
|
||||
Oid table_id;
|
||||
ArrayType *role_ids = NULL;
|
||||
List *qual_parse_rtable = NIL;
|
||||
List *with_check_parse_rtable = NIL;
|
||||
Node *qual = NULL;
|
||||
Node *with_check_qual = NULL;
|
||||
ScanKeyData skey[2];
|
||||
SysScanDesc sscan;
|
||||
HeapTuple policy_tuple;
|
||||
HeapTuple new_tuple;
|
||||
Datum values[Natts_pg_policy];
|
||||
bool isnull[Natts_pg_policy];
|
||||
bool replaces[Natts_pg_policy];
|
||||
ObjectAddress target;
|
||||
ObjectAddress myself;
|
||||
Datum cmd_datum;
|
||||
char polcmd;
|
||||
bool polcmd_isnull;
|
||||
|
||||
/* Parse role_ids */
|
||||
if (stmt->roles != NULL)
|
||||
|
@ -669,8 +669,8 @@ AlterPolicy(AlterPolicyStmt *stmt)
|
|||
/* Parse the using policy clause */
|
||||
if (stmt->qual)
|
||||
{
|
||||
RangeTblEntry *rte;
|
||||
ParseState *qual_pstate = make_parsestate(NULL);
|
||||
RangeTblEntry *rte;
|
||||
ParseState *qual_pstate = make_parsestate(NULL);
|
||||
|
||||
rte = addRangeTableEntryForRelation(qual_pstate, target_table,
|
||||
NULL, false, false);
|
||||
|
@ -688,8 +688,8 @@ AlterPolicy(AlterPolicyStmt *stmt)
|
|||
/* Parse the with-check policy clause */
|
||||
if (stmt->with_check)
|
||||
{
|
||||
RangeTblEntry *rte;
|
||||
ParseState *with_check_pstate = make_parsestate(NULL);
|
||||
RangeTblEntry *rte;
|
||||
ParseState *with_check_pstate = make_parsestate(NULL);
|
||||
|
||||
rte = addRangeTableEntryForRelation(with_check_pstate, target_table,
|
||||
NULL, false, false);
|
||||
|
@ -706,9 +706,9 @@ AlterPolicy(AlterPolicyStmt *stmt)
|
|||
}
|
||||
|
||||
/* zero-clear */
|
||||
memset(values, 0, sizeof(values));
|
||||
memset(values, 0, sizeof(values));
|
||||
memset(replaces, 0, sizeof(replaces));
|
||||
memset(isnull, 0, sizeof(isnull));
|
||||
memset(isnull, 0, sizeof(isnull));
|
||||
|
||||
/* Find policy to update. */
|
||||
pg_policy_rel = heap_open(PolicyRelationId, RowExclusiveLock);
|
||||
|
@ -756,8 +756,8 @@ AlterPolicy(AlterPolicyStmt *stmt)
|
|||
errmsg("only USING expression allowed for SELECT, DELETE")));
|
||||
|
||||
/*
|
||||
* If the command is INSERT then WITH CHECK should be the only
|
||||
* expression provided.
|
||||
* If the command is INSERT then WITH CHECK should be the only expression
|
||||
* provided.
|
||||
*/
|
||||
if ((polcmd == ACL_INSERT_CHR)
|
||||
&& stmt->qual != NULL)
|
||||
|
@ -829,19 +829,19 @@ AlterPolicy(AlterPolicyStmt *stmt)
|
|||
|
||||
/*
|
||||
* rename_policy -
|
||||
* change the name of a policy on a relation
|
||||
* change the name of a policy on a relation
|
||||
*/
|
||||
ObjectAddress
|
||||
rename_policy(RenameStmt *stmt)
|
||||
{
|
||||
Relation pg_policy_rel;
|
||||
Relation target_table;
|
||||
Oid table_id;
|
||||
Oid opoloid;
|
||||
ScanKeyData skey[2];
|
||||
SysScanDesc sscan;
|
||||
HeapTuple policy_tuple;
|
||||
ObjectAddress address;
|
||||
Relation pg_policy_rel;
|
||||
Relation target_table;
|
||||
Oid table_id;
|
||||
Oid opoloid;
|
||||
ScanKeyData skey[2];
|
||||
SysScanDesc sscan;
|
||||
HeapTuple policy_tuple;
|
||||
ObjectAddress address;
|
||||
|
||||
/* Get id of table. Also handles permissions checks. */
|
||||
table_id = RangeVarGetRelidExtended(stmt->relation, AccessExclusiveLock,
|
||||
|
@ -875,7 +875,7 @@ rename_policy(RenameStmt *stmt)
|
|||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DUPLICATE_OBJECT),
|
||||
errmsg("policy \"%s\" for table \"%s\" already exists",
|
||||
stmt->newname, RelationGetRelationName(target_table))));
|
||||
stmt->newname, RelationGetRelationName(target_table))));
|
||||
|
||||
systable_endscan(sscan);
|
||||
|
||||
|
@ -903,7 +903,7 @@ rename_policy(RenameStmt *stmt)
|
|||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
||||
errmsg("policy \"%s\" for table \"%s\" does not exist",
|
||||
stmt->subname, RelationGetRelationName(target_table))));
|
||||
stmt->subname, RelationGetRelationName(target_table))));
|
||||
|
||||
opoloid = HeapTupleGetOid(policy_tuple);
|
||||
|
||||
|
@ -923,9 +923,9 @@ rename_policy(RenameStmt *stmt)
|
|||
ObjectAddressSet(address, PolicyRelationId, opoloid);
|
||||
|
||||
/*
|
||||
* Invalidate relation's relcache entry so that other backends (and
|
||||
* this one too!) are sent SI message to make them rebuild relcache
|
||||
* entries. (Ideally this should happen automatically...)
|
||||
* Invalidate relation's relcache entry so that other backends (and this
|
||||
* one too!) are sent SI message to make them rebuild relcache entries.
|
||||
* (Ideally this should happen automatically...)
|
||||
*/
|
||||
CacheInvalidateRelcache(target_table);
|
||||
|
||||
|
@ -946,11 +946,11 @@ rename_policy(RenameStmt *stmt)
|
|||
Oid
|
||||
get_relation_policy_oid(Oid relid, const char *policy_name, bool missing_ok)
|
||||
{
|
||||
Relation pg_policy_rel;
|
||||
ScanKeyData skey[2];
|
||||
SysScanDesc sscan;
|
||||
HeapTuple policy_tuple;
|
||||
Oid policy_oid;
|
||||
Relation pg_policy_rel;
|
||||
ScanKeyData skey[2];
|
||||
SysScanDesc sscan;
|
||||
HeapTuple policy_tuple;
|
||||
Oid policy_oid;
|
||||
|
||||
pg_policy_rel = heap_open(PolicyRelationId, AccessShareLock);
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ static void AlterSchemaOwner_internal(HeapTuple tup, Relation rel, Oid newOwnerI
|
|||
Oid
|
||||
CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString)
|
||||
{
|
||||
const char *schemaName = stmt->schemaname;
|
||||
const char *schemaName = stmt->schemaname;
|
||||
Oid namespaceId;
|
||||
OverrideSearchPath *overridePath;
|
||||
List *parsetree_list;
|
||||
|
@ -68,7 +68,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString)
|
|||
/* fill schema name with the user name if not specified */
|
||||
if (!schemaName)
|
||||
{
|
||||
HeapTuple tuple;
|
||||
HeapTuple tuple;
|
||||
|
||||
tuple = SearchSysCache1(AUTHOID, ObjectIdGetDatum(owner_uid));
|
||||
if (!HeapTupleIsValid(tuple))
|
||||
|
|
|
@ -566,8 +566,8 @@ nextval_internal(Oid relid)
|
|||
PreventCommandIfReadOnly("nextval()");
|
||||
|
||||
/*
|
||||
* Forbid this during parallel operation because, to make it work,
|
||||
* the cooperating backends would need to share the backend-local cached
|
||||
* Forbid this during parallel operation because, to make it work, the
|
||||
* cooperating backends would need to share the backend-local cached
|
||||
* sequence information. Currently, we don't support that.
|
||||
*/
|
||||
PreventCommandIfParallelMode("nextval()");
|
||||
|
@ -702,10 +702,10 @@ nextval_internal(Oid relid)
|
|||
|
||||
/*
|
||||
* If something needs to be WAL logged, acquire an xid, so this
|
||||
* transaction's commit will trigger a WAL flush and wait for
|
||||
* syncrep. It's sufficient to ensure the toplevel transaction has an xid,
|
||||
* no need to assign xids subxacts, that'll already trigger an appropriate
|
||||
* wait. (Have to do that here, so we're outside the critical section)
|
||||
* transaction's commit will trigger a WAL flush and wait for syncrep.
|
||||
* It's sufficient to ensure the toplevel transaction has an xid, no need
|
||||
* to assign xids subxacts, that'll already trigger an appropriate wait.
|
||||
* (Have to do that here, so we're outside the critical section)
|
||||
*/
|
||||
if (logit && RelationNeedsWAL(seqrel))
|
||||
GetTopTransactionId();
|
||||
|
@ -870,8 +870,8 @@ do_setval(Oid relid, int64 next, bool iscalled)
|
|||
PreventCommandIfReadOnly("setval()");
|
||||
|
||||
/*
|
||||
* Forbid this during parallel operation because, to make it work,
|
||||
* the cooperating backends would need to share the backend-local cached
|
||||
* Forbid this during parallel operation because, to make it work, the
|
||||
* cooperating backends would need to share the backend-local cached
|
||||
* sequence information. Currently, we don't support that.
|
||||
*/
|
||||
PreventCommandIfParallelMode("setval()");
|
||||
|
|
|
@ -306,14 +306,14 @@ static void createForeignKeyTriggers(Relation rel, Oid refRelOid,
|
|||
Constraint *fkconstraint,
|
||||
Oid constraintOid, Oid indexOid);
|
||||
static void ATController(AlterTableStmt *parsetree,
|
||||
Relation rel, List *cmds, bool recurse, LOCKMODE lockmode);
|
||||
Relation rel, List *cmds, bool recurse, LOCKMODE lockmode);
|
||||
static void ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
|
||||
bool recurse, bool recursing, LOCKMODE lockmode);
|
||||
static void ATRewriteCatalogs(List **wqueue, LOCKMODE lockmode);
|
||||
static void ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel,
|
||||
AlterTableCmd *cmd, LOCKMODE lockmode);
|
||||
static void ATRewriteTables(AlterTableStmt *parsetree,
|
||||
List **wqueue, LOCKMODE lockmode);
|
||||
List **wqueue, LOCKMODE lockmode);
|
||||
static void ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode);
|
||||
static AlteredTableInfo *ATGetQueueEntry(List **wqueue, Relation rel);
|
||||
static void ATSimplePermissions(Relation rel, int allowed_targets);
|
||||
|
@ -631,7 +631,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId,
|
|||
|
||||
cooked = (CookedConstraint *) palloc(sizeof(CookedConstraint));
|
||||
cooked->contype = CONSTR_DEFAULT;
|
||||
cooked->conoid = InvalidOid; /* until created */
|
||||
cooked->conoid = InvalidOid; /* until created */
|
||||
cooked->name = NULL;
|
||||
cooked->attnum = attnum;
|
||||
cooked->expr = colDef->cooked_default;
|
||||
|
@ -1751,7 +1751,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
|
|||
|
||||
cooked = (CookedConstraint *) palloc(sizeof(CookedConstraint));
|
||||
cooked->contype = CONSTR_CHECK;
|
||||
cooked->conoid = InvalidOid; /* until created */
|
||||
cooked->conoid = InvalidOid; /* until created */
|
||||
cooked->name = pstrdup(name);
|
||||
cooked->attnum = 0; /* not used for constraints */
|
||||
cooked->expr = expr;
|
||||
|
@ -1781,7 +1781,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
|
|||
*/
|
||||
if (inhSchema != NIL)
|
||||
{
|
||||
int schema_attno = 0;
|
||||
int schema_attno = 0;
|
||||
|
||||
foreach(entry, schema)
|
||||
{
|
||||
|
@ -1809,14 +1809,14 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
|
|||
* Yes, try to merge the two column definitions. They must
|
||||
* have the same type, typmod, and collation.
|
||||
*/
|
||||
if (exist_attno == schema_attno)
|
||||
if (exist_attno == schema_attno)
|
||||
ereport(NOTICE,
|
||||
(errmsg("merging column \"%s\" with inherited definition",
|
||||
attributeName)));
|
||||
(errmsg("merging column \"%s\" with inherited definition",
|
||||
attributeName)));
|
||||
else
|
||||
ereport(NOTICE,
|
||||
(errmsg("moving and merging column \"%s\" with inherited definition", attributeName),
|
||||
errdetail("User-specified column moved to the position of the inherited column.")));
|
||||
(errmsg("moving and merging column \"%s\" with inherited definition", attributeName),
|
||||
errdetail("User-specified column moved to the position of the inherited column.")));
|
||||
def = (ColumnDef *) list_nth(inhSchema, exist_attno - 1);
|
||||
typenameTypeIdAndMod(NULL, def->typeName, &defTypeId, &deftypmod);
|
||||
typenameTypeIdAndMod(NULL, newdef->typeName, &newTypeId, &newtypmod);
|
||||
|
@ -3496,7 +3496,7 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel,
|
|||
break;
|
||||
case AT_ReAddIndex: /* ADD INDEX */
|
||||
address = ATExecAddIndex(tab, rel, (IndexStmt *) cmd->def, true,
|
||||
lockmode);
|
||||
lockmode);
|
||||
break;
|
||||
case AT_AddConstraint: /* ADD CONSTRAINT */
|
||||
address =
|
||||
|
@ -3803,7 +3803,7 @@ ATRewriteTables(AlterTableStmt *parsetree, List **wqueue, LOCKMODE lockmode)
|
|||
* And fire it only once.
|
||||
*/
|
||||
if (parsetree)
|
||||
EventTriggerTableRewrite((Node *)parsetree,
|
||||
EventTriggerTableRewrite((Node *) parsetree,
|
||||
tab->relid,
|
||||
tab->rewrite);
|
||||
|
||||
|
@ -5960,7 +5960,7 @@ ATExecAddIndexConstraint(AlteredTableInfo *tab, Relation rel,
|
|||
true, /* update pg_index */
|
||||
true, /* remove old dependencies */
|
||||
allowSystemTableMods,
|
||||
false); /* is_internal */
|
||||
false); /* is_internal */
|
||||
|
||||
index_close(indexRel, NoLock);
|
||||
|
||||
|
@ -6906,7 +6906,7 @@ ATExecValidateConstraint(Relation rel, char *constrName, bool recurse,
|
|||
HeapTupleGetOid(tuple));
|
||||
}
|
||||
else
|
||||
address = InvalidObjectAddress; /* already validated */
|
||||
address = InvalidObjectAddress; /* already validated */
|
||||
|
||||
systable_endscan(scan);
|
||||
|
||||
|
@ -7866,11 +7866,12 @@ ATPrepAlterColumnType(List **wqueue,
|
|||
{
|
||||
/*
|
||||
* Set up an expression to transform the old data value to the new
|
||||
* type. If a USING option was given, use the expression as transformed
|
||||
* by transformAlterTableStmt, else just take the old value and try to
|
||||
* coerce it. We do this first so that type incompatibility can be
|
||||
* detected before we waste effort, and because we need the expression
|
||||
* to be parsed against the original table row type.
|
||||
* type. If a USING option was given, use the expression as
|
||||
* transformed by transformAlterTableStmt, else just take the old
|
||||
* value and try to coerce it. We do this first so that type
|
||||
* incompatibility can be detected before we waste effort, and because
|
||||
* we need the expression to be parsed against the original table row
|
||||
* type.
|
||||
*/
|
||||
if (!transform)
|
||||
{
|
||||
|
@ -8221,8 +8222,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
|
|||
* specified in the policy's USING or WITH CHECK qual
|
||||
* expressions. It might be possible to rewrite and recheck
|
||||
* the policy expression, but punt for now. It's certainly
|
||||
* easy enough to remove and recreate the policy; still,
|
||||
* FIXME someday.
|
||||
* easy enough to remove and recreate the policy; still, FIXME
|
||||
* someday.
|
||||
*/
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
|
@ -9701,9 +9702,9 @@ AlterTableMoveAll(AlterTableMoveAllStmt *stmt)
|
|||
!ConditionalLockRelationOid(relOid, AccessExclusiveLock))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_OBJECT_IN_USE),
|
||||
errmsg("aborting because lock on relation \"%s\".\"%s\" is not available",
|
||||
get_namespace_name(relForm->relnamespace),
|
||||
NameStr(relForm->relname))));
|
||||
errmsg("aborting because lock on relation \"%s\".\"%s\" is not available",
|
||||
get_namespace_name(relForm->relnamespace),
|
||||
NameStr(relForm->relname))));
|
||||
else
|
||||
LockRelationOid(relOid, AccessExclusiveLock);
|
||||
|
||||
|
@ -10923,9 +10924,9 @@ ATExecReplicaIdentity(Relation rel, ReplicaIdentityStmt *stmt, LOCKMODE lockmode
|
|||
static void
|
||||
ATExecEnableRowSecurity(Relation rel)
|
||||
{
|
||||
Relation pg_class;
|
||||
Oid relid;
|
||||
HeapTuple tuple;
|
||||
Relation pg_class;
|
||||
Oid relid;
|
||||
HeapTuple tuple;
|
||||
|
||||
relid = RelationGetRelid(rel);
|
||||
|
||||
|
@ -10949,9 +10950,9 @@ ATExecEnableRowSecurity(Relation rel)
|
|||
static void
|
||||
ATExecDisableRowSecurity(Relation rel)
|
||||
{
|
||||
Relation pg_class;
|
||||
Oid relid;
|
||||
HeapTuple tuple;
|
||||
Relation pg_class;
|
||||
Oid relid;
|
||||
HeapTuple tuple;
|
||||
|
||||
relid = RelationGetRelid(rel);
|
||||
|
||||
|
|
|
@ -4329,7 +4329,7 @@ AfterTriggerEndSubXact(bool isCommit)
|
|||
static void
|
||||
AfterTriggerEnlargeQueryState(void)
|
||||
{
|
||||
int init_depth = afterTriggers.maxquerydepth;
|
||||
int init_depth = afterTriggers.maxquerydepth;
|
||||
|
||||
Assert(afterTriggers.query_depth >= afterTriggers.maxquerydepth);
|
||||
|
||||
|
@ -4396,7 +4396,7 @@ SetConstraintStateCreate(int numalloc)
|
|||
state = (SetConstraintState)
|
||||
MemoryContextAllocZero(TopTransactionContext,
|
||||
offsetof(SetConstraintStateData, trigstates) +
|
||||
numalloc * sizeof(SetConstraintTriggerData));
|
||||
numalloc * sizeof(SetConstraintTriggerData));
|
||||
|
||||
state->numalloc = numalloc;
|
||||
|
||||
|
|
|
@ -575,13 +575,13 @@ DefineType(List *names, List *parameters)
|
|||
if (typmodinOid && func_volatile(typmodinOid) == PROVOLATILE_VOLATILE)
|
||||
ereport(WARNING,
|
||||
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
|
||||
errmsg("type modifier input function %s should not be volatile",
|
||||
NameListToString(typmodinName))));
|
||||
errmsg("type modifier input function %s should not be volatile",
|
||||
NameListToString(typmodinName))));
|
||||
if (typmodoutOid && func_volatile(typmodoutOid) == PROVOLATILE_VOLATILE)
|
||||
ereport(WARNING,
|
||||
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
|
||||
errmsg("type modifier output function %s should not be volatile",
|
||||
NameListToString(typmodoutName))));
|
||||
errmsg("type modifier output function %s should not be volatile",
|
||||
NameListToString(typmodoutName))));
|
||||
|
||||
/*
|
||||
* OK, we're done checking, time to make the type. We must assign the
|
||||
|
@ -643,32 +643,32 @@ DefineType(List *names, List *parameters)
|
|||
array_type, /* type name */
|
||||
typeNamespace, /* namespace */
|
||||
InvalidOid, /* relation oid (n/a here) */
|
||||
0, /* relation kind (ditto) */
|
||||
GetUserId(), /* owner's ID */
|
||||
-1, /* internal size (always varlena) */
|
||||
0, /* relation kind (ditto) */
|
||||
GetUserId(), /* owner's ID */
|
||||
-1, /* internal size (always varlena) */
|
||||
TYPTYPE_BASE, /* type-type (base type) */
|
||||
TYPCATEGORY_ARRAY, /* type-category (array) */
|
||||
false, /* array types are never preferred */
|
||||
false, /* array types are never preferred */
|
||||
delimiter, /* array element delimiter */
|
||||
F_ARRAY_IN, /* input procedure */
|
||||
F_ARRAY_OUT, /* output procedure */
|
||||
F_ARRAY_OUT, /* output procedure */
|
||||
F_ARRAY_RECV, /* receive procedure */
|
||||
F_ARRAY_SEND, /* send procedure */
|
||||
typmodinOid, /* typmodin procedure */
|
||||
typmodinOid, /* typmodin procedure */
|
||||
typmodoutOid, /* typmodout procedure */
|
||||
F_ARRAY_TYPANALYZE, /* analyze procedure */
|
||||
typoid, /* element type ID */
|
||||
true, /* yes this is an array type */
|
||||
typoid, /* element type ID */
|
||||
true, /* yes this is an array type */
|
||||
InvalidOid, /* no further array type */
|
||||
InvalidOid, /* base type ID */
|
||||
NULL, /* never a default type value */
|
||||
NULL, /* binary default isn't sent either */
|
||||
false, /* never passed by value */
|
||||
NULL, /* never a default type value */
|
||||
NULL, /* binary default isn't sent either */
|
||||
false, /* never passed by value */
|
||||
alignment, /* see above */
|
||||
'x', /* ARRAY is always toastable */
|
||||
-1, /* typMod (Domains only) */
|
||||
0, /* Array dimensions of typbasetype */
|
||||
false, /* Type NOT NULL */
|
||||
'x', /* ARRAY is always toastable */
|
||||
-1, /* typMod (Domains only) */
|
||||
0, /* Array dimensions of typbasetype */
|
||||
false, /* Type NOT NULL */
|
||||
collation); /* type's collation */
|
||||
|
||||
pfree(array_type);
|
||||
|
@ -1616,7 +1616,7 @@ makeRangeConstructors(const char *name, Oid namespace,
|
|||
PointerGetDatum(NULL), /* parameterModes */
|
||||
PointerGetDatum(NULL), /* parameterNames */
|
||||
NIL, /* parameterDefaults */
|
||||
PointerGetDatum(NULL), /* trftypes */
|
||||
PointerGetDatum(NULL), /* trftypes */
|
||||
PointerGetDatum(NULL), /* proconfig */
|
||||
1.0, /* procost */
|
||||
0.0); /* prorows */
|
||||
|
|
|
@ -87,7 +87,8 @@ CreateRole(CreateRoleStmt *stmt)
|
|||
bool createdb = false; /* Can the user create databases? */
|
||||
bool canlogin = false; /* Can this user login? */
|
||||
bool isreplication = false; /* Is this a replication role? */
|
||||
bool bypassrls = false; /* Is this a row security enabled role? */
|
||||
bool bypassrls = false; /* Is this a row security enabled
|
||||
* role? */
|
||||
int connlimit = -1; /* maximum connections allowed */
|
||||
List *addroleto = NIL; /* roles to make this a member of */
|
||||
List *rolemembers = NIL; /* roles to be members of this role */
|
||||
|
@ -300,7 +301,7 @@ CreateRole(CreateRoleStmt *stmt)
|
|||
if (!superuser())
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
|
||||
errmsg("must be superuser to change bypassrls attribute.")));
|
||||
errmsg("must be superuser to change bypassrls attribute.")));
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -681,7 +682,7 @@ AlterRole(AlterRoleStmt *stmt)
|
|||
if (!superuser())
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
|
||||
errmsg("must be superuser to change bypassrls attribute")));
|
||||
errmsg("must be superuser to change bypassrls attribute")));
|
||||
}
|
||||
else if (!have_createrole_privilege())
|
||||
{
|
||||
|
@ -721,11 +722,11 @@ AlterRole(AlterRoleStmt *stmt)
|
|||
* Call the password checking hook if there is one defined
|
||||
*/
|
||||
if (check_password_hook && password)
|
||||
(*check_password_hook)(rolename ,
|
||||
password,
|
||||
isMD5(password) ? PASSWORD_TYPE_MD5 : PASSWORD_TYPE_PLAINTEXT,
|
||||
validUntil_datum,
|
||||
validUntil_null);
|
||||
(*check_password_hook) (rolename,
|
||||
password,
|
||||
isMD5(password) ? PASSWORD_TYPE_MD5 : PASSWORD_TYPE_PLAINTEXT,
|
||||
validUntil_datum,
|
||||
validUntil_null);
|
||||
|
||||
/*
|
||||
* Build an updated tuple, perusing the information just obtained
|
||||
|
@ -1358,8 +1359,8 @@ roleSpecsToIds(List *memberNames)
|
|||
|
||||
foreach(l, memberNames)
|
||||
{
|
||||
Node *rolespec = (Node *) lfirst(l);
|
||||
Oid roleid;
|
||||
Node *rolespec = (Node *) lfirst(l);
|
||||
Oid roleid;
|
||||
|
||||
roleid = get_rolespec_oid(rolespec, false);
|
||||
result = lappend_oid(result, roleid);
|
||||
|
@ -1455,7 +1456,7 @@ AddRoleMems(const char *rolename, Oid roleid,
|
|||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_GRANT_OPERATION),
|
||||
(errmsg("role \"%s\" is a member of role \"%s\"",
|
||||
rolename, get_rolespec_name((Node *) memberRole)))));
|
||||
rolename, get_rolespec_name((Node *) memberRole)))));
|
||||
|
||||
/*
|
||||
* Check if entry for this role/member already exists; if so, give
|
||||
|
@ -1470,7 +1471,7 @@ AddRoleMems(const char *rolename, Oid roleid,
|
|||
{
|
||||
ereport(NOTICE,
|
||||
(errmsg("role \"%s\" is already a member of role \"%s\"",
|
||||
get_rolespec_name((Node *) memberRole), rolename)));
|
||||
get_rolespec_name((Node *) memberRole), rolename)));
|
||||
ReleaseSysCache(authmem_tuple);
|
||||
continue;
|
||||
}
|
||||
|
@ -1581,7 +1582,7 @@ DelRoleMems(const char *rolename, Oid roleid,
|
|||
{
|
||||
ereport(WARNING,
|
||||
(errmsg("role \"%s\" is not a member of role \"%s\"",
|
||||
get_rolespec_name((Node *) memberRole), rolename)));
|
||||
get_rolespec_name((Node *) memberRole), rolename)));
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@ static bool vacuum_rel(Oid relid, RangeVar *relation, int options,
|
|||
void
|
||||
ExecVacuum(VacuumStmt *vacstmt, bool isTopLevel)
|
||||
{
|
||||
VacuumParams params;
|
||||
VacuumParams params;
|
||||
|
||||
/* sanity checks on options */
|
||||
Assert(vacstmt->options & (VACOPT_VACUUM | VACOPT_ANALYZE));
|
||||
|
@ -530,8 +530,8 @@ vacuum_set_xid_limits(Relation rel,
|
|||
|
||||
/*
|
||||
* Compute the multixact age for which freezing is urgent. This is
|
||||
* normally autovacuum_multixact_freeze_max_age, but may be less if we
|
||||
* are short of multixact member space.
|
||||
* normally autovacuum_multixact_freeze_max_age, but may be less if we are
|
||||
* short of multixact member space.
|
||||
*/
|
||||
effective_multixact_freeze_max_age = MultiXactMemberFreezeThreshold();
|
||||
|
||||
|
@ -1134,9 +1134,8 @@ vac_truncate_clog(TransactionId frozenXID,
|
|||
return;
|
||||
|
||||
/*
|
||||
* Truncate CLOG and CommitTs to the oldest computed value.
|
||||
* Note we don't truncate multixacts; that will be done by the next
|
||||
* checkpoint.
|
||||
* Truncate CLOG and CommitTs to the oldest computed value. Note we don't
|
||||
* truncate multixacts; that will be done by the next checkpoint.
|
||||
*/
|
||||
TruncateCLOG(frozenXID);
|
||||
TruncateCommitTs(frozenXID, true);
|
||||
|
|
|
@ -105,7 +105,7 @@ typedef struct LVRelStats
|
|||
BlockNumber old_rel_pages; /* previous value of pg_class.relpages */
|
||||
BlockNumber rel_pages; /* total number of pages */
|
||||
BlockNumber scanned_pages; /* number of pages we examined */
|
||||
BlockNumber pinskipped_pages; /* # of pages we skipped due to a pin */
|
||||
BlockNumber pinskipped_pages; /* # of pages we skipped due to a pin */
|
||||
double scanned_tuples; /* counts only tuples on scanned pages */
|
||||
double old_rel_tuples; /* previous value of pg_class.reltuples */
|
||||
double new_rel_tuples; /* new estimated total # of tuples */
|
||||
|
@ -336,7 +336,8 @@ lazy_vacuum_rel(Relation onerel, int options, VacuumParams *params,
|
|||
TimestampDifferenceExceeds(starttime, endtime,
|
||||
params->log_min_duration))
|
||||
{
|
||||
StringInfoData buf;
|
||||
StringInfoData buf;
|
||||
|
||||
TimestampDifference(starttime, endtime, &secs, &usecs);
|
||||
|
||||
read_rate = 0;
|
||||
|
@ -369,7 +370,7 @@ lazy_vacuum_rel(Relation onerel, int options, VacuumParams *params,
|
|||
vacrelstats->new_rel_tuples,
|
||||
vacrelstats->new_dead_tuples);
|
||||
appendStringInfo(&buf,
|
||||
_("buffer usage: %d hits, %d misses, %d dirtied\n"),
|
||||
_("buffer usage: %d hits, %d misses, %d dirtied\n"),
|
||||
VacuumPageHit,
|
||||
VacuumPageMiss,
|
||||
VacuumPageDirty);
|
||||
|
@ -454,7 +455,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
|
|||
BlockNumber next_not_all_visible_block;
|
||||
bool skipping_all_visible_blocks;
|
||||
xl_heap_freeze_tuple *frozen;
|
||||
StringInfoData buf;
|
||||
StringInfoData buf;
|
||||
|
||||
pg_rusage_init(&ru0);
|
||||
|
||||
|
@ -1784,7 +1785,7 @@ static bool
|
|||
heap_page_is_all_visible(Relation rel, Buffer buf, TransactionId *visibility_cutoff_xid)
|
||||
{
|
||||
Page page = BufferGetPage(buf);
|
||||
BlockNumber blockno = BufferGetBlockNumber(buf);
|
||||
BlockNumber blockno = BufferGetBlockNumber(buf);
|
||||
OffsetNumber offnum,
|
||||
maxoff;
|
||||
bool all_visible = true;
|
||||
|
|
|
@ -405,10 +405,10 @@ ExecSupportsMarkRestore(Path *pathnode)
|
|||
* that does, we presently come here only for ResultPath nodes,
|
||||
* which represent Result plans without a child plan. So there is
|
||||
* nothing to recurse to and we can just say "false". (This means
|
||||
* that Result's support for mark/restore is in fact dead code.
|
||||
* We keep it since it's not much code, and someday the planner
|
||||
* might be smart enough to use it. That would require making
|
||||
* this function smarter too, of course.)
|
||||
* that Result's support for mark/restore is in fact dead code. We
|
||||
* keep it since it's not much code, and someday the planner might
|
||||
* be smart enough to use it. That would require making this
|
||||
* function smarter too, of course.)
|
||||
*/
|
||||
Assert(IsA(pathnode, ResultPath));
|
||||
return false;
|
||||
|
|
|
@ -78,9 +78,9 @@
|
|||
* another in-progress tuple, it has two options:
|
||||
*
|
||||
* 1. back out the speculatively inserted tuple, then wait for the other
|
||||
* transaction, and retry. Or,
|
||||
* transaction, and retry. Or,
|
||||
* 2. wait for the other transaction, with the speculatively inserted tuple
|
||||
* still in place.
|
||||
* still in place.
|
||||
*
|
||||
* If two backends insert at the same time, and both try to wait for each
|
||||
* other, they will deadlock. So option 2 is not acceptable. Option 1
|
||||
|
@ -428,7 +428,7 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
|
|||
indexRelation, indexInfo,
|
||||
tupleid, values, isnull,
|
||||
estate, false,
|
||||
waitMode, violationOK, NULL);
|
||||
waitMode, violationOK, NULL);
|
||||
}
|
||||
|
||||
if ((checkUnique == UNIQUE_CHECK_PARTIAL ||
|
||||
|
@ -538,7 +538,7 @@ ExecCheckIndexConstraints(TupleTableSlot *slot,
|
|||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("ON CONFLICT does not support deferred unique constraints/exclusion constraints as arbiters"),
|
||||
errtableconstraint(heapRelation,
|
||||
RelationGetRelationName(indexRelation))));
|
||||
RelationGetRelationName(indexRelation))));
|
||||
|
||||
checkedIndex = true;
|
||||
|
||||
|
@ -578,7 +578,7 @@ ExecCheckIndexConstraints(TupleTableSlot *slot,
|
|||
satisfiesConstraint =
|
||||
check_exclusion_or_unique_constraint(heapRelation, indexRelation,
|
||||
indexInfo, &invalidItemPtr,
|
||||
values, isnull, estate, false,
|
||||
values, isnull, estate, false,
|
||||
CEOUC_WAIT, true,
|
||||
conflictTid);
|
||||
if (!satisfiesConstraint)
|
||||
|
@ -814,9 +814,9 @@ retry:
|
|||
errmsg("could not create exclusion constraint \"%s\"",
|
||||
RelationGetRelationName(index)),
|
||||
error_new && error_existing ?
|
||||
errdetail("Key %s conflicts with key %s.",
|
||||
error_new, error_existing) :
|
||||
errdetail("Key conflicts exist."),
|
||||
errdetail("Key %s conflicts with key %s.",
|
||||
error_new, error_existing) :
|
||||
errdetail("Key conflicts exist."),
|
||||
errtableconstraint(heap,
|
||||
RelationGetRelationName(index))));
|
||||
else
|
||||
|
@ -825,9 +825,9 @@ retry:
|
|||
errmsg("conflicting key value violates exclusion constraint \"%s\"",
|
||||
RelationGetRelationName(index)),
|
||||
error_new && error_existing ?
|
||||
errdetail("Key %s conflicts with existing key %s.",
|
||||
error_new, error_existing) :
|
||||
errdetail("Key conflicts with existing key."),
|
||||
errdetail("Key %s conflicts with existing key %s.",
|
||||
error_new, error_existing) :
|
||||
errdetail("Key conflicts with existing key."),
|
||||
errtableconstraint(heap,
|
||||
RelationGetRelationName(index))));
|
||||
}
|
||||
|
@ -838,8 +838,8 @@ retry:
|
|||
* Ordinarily, at this point the search should have found the originally
|
||||
* inserted tuple (if any), unless we exited the loop early because of
|
||||
* conflict. However, it is possible to define exclusion constraints for
|
||||
* which that wouldn't be true --- for instance, if the operator is <>.
|
||||
* So we no longer complain if found_self is still false.
|
||||
* which that wouldn't be true --- for instance, if the operator is <>. So
|
||||
* we no longer complain if found_self is still false.
|
||||
*/
|
||||
|
||||
econtext->ecxt_scantuple = save_scantuple;
|
||||
|
|
|
@ -153,16 +153,16 @@ standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
|
|||
* If the transaction is read-only, we need to check if any writes are
|
||||
* planned to non-temporary tables. EXPLAIN is considered read-only.
|
||||
*
|
||||
* Don't allow writes in parallel mode. Supporting UPDATE and DELETE would
|
||||
* require (a) storing the combocid hash in shared memory, rather than
|
||||
* synchronizing it just once at the start of parallelism, and (b) an
|
||||
* Don't allow writes in parallel mode. Supporting UPDATE and DELETE
|
||||
* would require (a) storing the combocid hash in shared memory, rather
|
||||
* than synchronizing it just once at the start of parallelism, and (b) an
|
||||
* alternative to heap_update()'s reliance on xmax for mutual exclusion.
|
||||
* INSERT may have no such troubles, but we forbid it to simplify the
|
||||
* checks.
|
||||
*
|
||||
* We have lower-level defenses in CommandCounterIncrement and elsewhere
|
||||
* against performing unsafe operations in parallel mode, but this gives
|
||||
* a more user-friendly error message.
|
||||
* against performing unsafe operations in parallel mode, but this gives a
|
||||
* more user-friendly error message.
|
||||
*/
|
||||
if ((XactReadOnly || IsInParallelMode()) &&
|
||||
!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
|
||||
|
@ -670,14 +670,14 @@ ExecCheckRTEPerms(RangeTblEntry *rte)
|
|||
*/
|
||||
if (remainingPerms & ACL_INSERT && !ExecCheckRTEPermsModified(relOid,
|
||||
userid,
|
||||
rte->insertedCols,
|
||||
ACL_INSERT))
|
||||
rte->insertedCols,
|
||||
ACL_INSERT))
|
||||
return false;
|
||||
|
||||
if (remainingPerms & ACL_UPDATE && !ExecCheckRTEPermsModified(relOid,
|
||||
userid,
|
||||
rte->updatedCols,
|
||||
ACL_UPDATE))
|
||||
rte->updatedCols,
|
||||
ACL_UPDATE))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
@ -695,10 +695,9 @@ ExecCheckRTEPermsModified(Oid relOid, Oid userid, Bitmapset *modifiedCols,
|
|||
int col = -1;
|
||||
|
||||
/*
|
||||
* When the query doesn't explicitly update any columns, allow the
|
||||
* query if we have permission on any column of the rel. This is
|
||||
* to handle SELECT FOR UPDATE as well as possible corner cases in
|
||||
* UPDATE.
|
||||
* When the query doesn't explicitly update any columns, allow the query
|
||||
* if we have permission on any column of the rel. This is to handle
|
||||
* SELECT FOR UPDATE as well as possible corner cases in UPDATE.
|
||||
*/
|
||||
if (bms_is_empty(modifiedCols))
|
||||
{
|
||||
|
@ -742,8 +741,8 @@ ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
|
|||
ListCell *l;
|
||||
|
||||
/*
|
||||
* Fail if write permissions are requested in parallel mode for
|
||||
* table (temp or non-temp), otherwise fail for any non-temp table.
|
||||
* Fail if write permissions are requested in parallel mode for table
|
||||
* (temp or non-temp), otherwise fail for any non-temp table.
|
||||
*/
|
||||
foreach(l, plannedstmt->rtable)
|
||||
{
|
||||
|
@ -1665,9 +1664,9 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
|
|||
Relation rel = resultRelInfo->ri_RelationDesc;
|
||||
TupleDesc tupdesc = RelationGetDescr(rel);
|
||||
TupleConstr *constr = tupdesc->constr;
|
||||
Bitmapset *modifiedCols;
|
||||
Bitmapset *insertedCols;
|
||||
Bitmapset *updatedCols;
|
||||
Bitmapset *modifiedCols;
|
||||
Bitmapset *insertedCols;
|
||||
Bitmapset *updatedCols;
|
||||
|
||||
Assert(constr);
|
||||
|
||||
|
@ -1722,7 +1721,7 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
|
|||
(errcode(ERRCODE_CHECK_VIOLATION),
|
||||
errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
|
||||
RelationGetRelationName(rel), failed),
|
||||
val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
|
||||
val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
|
||||
errtableconstraint(rel, failed)));
|
||||
}
|
||||
}
|
||||
|
@ -1773,11 +1772,11 @@ ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
|
|||
/*
|
||||
* WITH CHECK OPTION checks are intended to ensure that the new tuple
|
||||
* is visible (in the case of a view) or that it passes the
|
||||
* 'with-check' policy (in the case of row security).
|
||||
* If the qual evaluates to NULL or FALSE, then the new tuple won't be
|
||||
* included in the view or doesn't pass the 'with-check' policy for the
|
||||
* table. We need ExecQual to return FALSE for NULL to handle the view
|
||||
* case (the opposite of what we do above for CHECK constraints).
|
||||
* 'with-check' policy (in the case of row security). If the qual
|
||||
* evaluates to NULL or FALSE, then the new tuple won't be included in
|
||||
* the view or doesn't pass the 'with-check' policy for the table. We
|
||||
* need ExecQual to return FALSE for NULL to handle the view case (the
|
||||
* opposite of what we do above for CHECK constraints).
|
||||
*/
|
||||
if (!ExecQual((List *) wcoExpr, econtext, false))
|
||||
{
|
||||
|
@ -1788,14 +1787,15 @@ ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
|
|||
|
||||
switch (wco->kind)
|
||||
{
|
||||
/*
|
||||
* For WITH CHECK OPTIONs coming from views, we might be able to
|
||||
* provide the details on the row, depending on the permissions
|
||||
* on the relation (that is, if the user could view it directly
|
||||
* anyway). For RLS violations, we don't include the data since
|
||||
* we don't know if the user should be able to view the tuple as
|
||||
* as that depends on the USING policy.
|
||||
*/
|
||||
/*
|
||||
* For WITH CHECK OPTIONs coming from views, we might be
|
||||
* able to provide the details on the row, depending on
|
||||
* the permissions on the relation (that is, if the user
|
||||
* could view it directly anyway). For RLS violations, we
|
||||
* don't include the data since we don't know if the user
|
||||
* should be able to view the tuple as as that depends on
|
||||
* the USING policy.
|
||||
*/
|
||||
case WCO_VIEW_CHECK:
|
||||
insertedCols = GetInsertedColumns(resultRelInfo, estate);
|
||||
updatedCols = GetUpdatedColumns(resultRelInfo, estate);
|
||||
|
@ -1808,8 +1808,8 @@ ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
|
|||
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION),
|
||||
errmsg("new row violates WITH CHECK OPTION for \"%s\"",
|
||||
wco->relname),
|
||||
errmsg("new row violates WITH CHECK OPTION for \"%s\"",
|
||||
wco->relname),
|
||||
val_desc ? errdetail("Failing row contains %s.",
|
||||
val_desc) : 0));
|
||||
break;
|
||||
|
@ -1817,14 +1817,14 @@ ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
|
|||
case WCO_RLS_UPDATE_CHECK:
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
|
||||
errmsg("new row violates row level security policy for \"%s\"",
|
||||
wco->relname)));
|
||||
errmsg("new row violates row level security policy for \"%s\"",
|
||||
wco->relname)));
|
||||
break;
|
||||
case WCO_RLS_CONFLICT_CHECK:
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
|
||||
errmsg("new row violates row level security policy (USING expression) for \"%s\"",
|
||||
wco->relname)));
|
||||
errmsg("new row violates row level security policy (USING expression) for \"%s\"",
|
||||
wco->relname)));
|
||||
break;
|
||||
default:
|
||||
elog(ERROR, "unrecognized WCO kind: %u", wco->kind);
|
||||
|
@ -1915,8 +1915,8 @@ ExecBuildSlotValueDescription(Oid reloid,
|
|||
{
|
||||
/*
|
||||
* No table-level SELECT, so need to make sure they either have
|
||||
* SELECT rights on the column or that they have provided the
|
||||
* data for the column. If not, omit this column from the error
|
||||
* SELECT rights on the column or that they have provided the data
|
||||
* for the column. If not, omit this column from the error
|
||||
* message.
|
||||
*/
|
||||
aclresult = pg_attribute_aclcheck(reloid, tupdesc->attrs[i]->attnum,
|
||||
|
@ -2258,14 +2258,14 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
|
|||
break;
|
||||
case LockWaitSkip:
|
||||
if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
|
||||
return NULL; /* skip instead of waiting */
|
||||
return NULL; /* skip instead of waiting */
|
||||
break;
|
||||
case LockWaitError:
|
||||
if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
|
||||
errmsg("could not obtain lock on row in relation \"%s\"",
|
||||
RelationGetRelationName(relation))));
|
||||
RelationGetRelationName(relation))));
|
||||
break;
|
||||
}
|
||||
continue; /* loop back to repeat heap_fetch */
|
||||
|
@ -2313,9 +2313,9 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
|
|||
* doing so would require changing heap_update and
|
||||
* heap_delete to not complain about updating "invisible"
|
||||
* tuples, which seems pretty scary (heap_lock_tuple will
|
||||
* not complain, but few callers expect HeapTupleInvisible,
|
||||
* and we're not one of them). So for now, treat the tuple
|
||||
* as deleted and do not process.
|
||||
* not complain, but few callers expect
|
||||
* HeapTupleInvisible, and we're not one of them). So for
|
||||
* now, treat the tuple as deleted and do not process.
|
||||
*/
|
||||
ReleaseBuffer(buffer);
|
||||
return NULL;
|
||||
|
@ -2563,8 +2563,8 @@ EvalPlanQualFetchRowMarks(EPQState *epqstate)
|
|||
if (fdwroutine->RefetchForeignRow == NULL)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot lock rows in foreign table \"%s\"",
|
||||
RelationGetRelationName(erm->relation))));
|
||||
errmsg("cannot lock rows in foreign table \"%s\"",
|
||||
RelationGetRelationName(erm->relation))));
|
||||
copyTuple = fdwroutine->RefetchForeignRow(epqstate->estate,
|
||||
erm,
|
||||
datum,
|
||||
|
|
|
@ -182,8 +182,8 @@ static Datum ExecEvalArrayCoerceExpr(ArrayCoerceExprState *astate,
|
|||
static Datum ExecEvalCurrentOfExpr(ExprState *exprstate, ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
static Datum ExecEvalGroupingFuncExpr(GroupingFuncExprState *gstate,
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
ExprContext *econtext,
|
||||
bool *isNull, ExprDoneCond *isDone);
|
||||
|
||||
|
||||
/* ----------------------------------------------------------------
|
||||
|
@ -3034,10 +3034,10 @@ ExecEvalGroupingFuncExpr(GroupingFuncExprState *gstate,
|
|||
bool *isNull,
|
||||
ExprDoneCond *isDone)
|
||||
{
|
||||
int result = 0;
|
||||
int attnum = 0;
|
||||
Bitmapset *grouped_cols = gstate->aggstate->grouped_cols;
|
||||
ListCell *lc;
|
||||
int result = 0;
|
||||
int attnum = 0;
|
||||
Bitmapset *grouped_cols = gstate->aggstate->grouped_cols;
|
||||
ListCell *lc;
|
||||
|
||||
if (isDone)
|
||||
*isDone = ExprSingleResult;
|
||||
|
@ -4529,7 +4529,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
|
|||
GroupingFuncExprState *grp_state = makeNode(GroupingFuncExprState);
|
||||
Agg *agg = NULL;
|
||||
|
||||
if (!parent || !IsA(parent, AggState) || !IsA(parent->plan, Agg))
|
||||
if (!parent || !IsA(parent, AggState) ||!IsA(parent->plan, Agg))
|
||||
elog(ERROR, "parent of GROUPING is not Agg node");
|
||||
|
||||
grp_state->aggstate = (AggState *) parent;
|
||||
|
|
|
@ -645,7 +645,7 @@ get_last_attnums(Node *node, ProjectionInfo *projInfo)
|
|||
* overall targetlist's econtext. GroupingFunc arguments are never
|
||||
* evaluated at all.
|
||||
*/
|
||||
if (IsA(node, Aggref) || IsA(node, GroupingFunc))
|
||||
if (IsA(node, Aggref) ||IsA(node, GroupingFunc))
|
||||
return false;
|
||||
if (IsA(node, WindowFunc))
|
||||
return false;
|
||||
|
|
|
@ -337,11 +337,11 @@ typedef struct AggStatePerPhaseData
|
|||
{
|
||||
int numsets; /* number of grouping sets (or 0) */
|
||||
int *gset_lengths; /* lengths of grouping sets */
|
||||
Bitmapset **grouped_cols; /* column groupings for rollup */
|
||||
Bitmapset **grouped_cols; /* column groupings for rollup */
|
||||
FmgrInfo *eqfunctions; /* per-grouping-field equality fns */
|
||||
Agg *aggnode; /* Agg node for phase data */
|
||||
Sort *sortnode; /* Sort node for input ordering for phase */
|
||||
} AggStatePerPhaseData;
|
||||
} AggStatePerPhaseData;
|
||||
|
||||
/*
|
||||
* To implement hashed aggregation, we need a hashtable that stores a
|
||||
|
@ -380,12 +380,12 @@ static void finalize_aggregate(AggState *aggstate,
|
|||
AggStatePerGroup pergroupstate,
|
||||
Datum *resultVal, bool *resultIsNull);
|
||||
static void prepare_projection_slot(AggState *aggstate,
|
||||
TupleTableSlot *slot,
|
||||
int currentSet);
|
||||
TupleTableSlot *slot,
|
||||
int currentSet);
|
||||
static void finalize_aggregates(AggState *aggstate,
|
||||
AggStatePerAgg peragg,
|
||||
AggStatePerGroup pergroup,
|
||||
int currentSet);
|
||||
AggStatePerAgg peragg,
|
||||
AggStatePerGroup pergroup,
|
||||
int currentSet);
|
||||
static TupleTableSlot *project_aggregates(AggState *aggstate);
|
||||
static Bitmapset *find_unaggregated_cols(AggState *aggstate);
|
||||
static bool find_unaggregated_cols_walker(Node *node, Bitmapset **colnos);
|
||||
|
@ -441,12 +441,12 @@ initialize_phase(AggState *aggstate, int newphase)
|
|||
}
|
||||
|
||||
/*
|
||||
* If this isn't the last phase, we need to sort appropriately for the next
|
||||
* phase in sequence.
|
||||
* If this isn't the last phase, we need to sort appropriately for the
|
||||
* next phase in sequence.
|
||||
*/
|
||||
if (newphase < aggstate->numphases - 1)
|
||||
{
|
||||
Sort *sortnode = aggstate->phases[newphase+1].sortnode;
|
||||
Sort *sortnode = aggstate->phases[newphase + 1].sortnode;
|
||||
PlanState *outerNode = outerPlanState(aggstate);
|
||||
TupleDesc tupDesc = ExecGetResultType(outerNode);
|
||||
|
||||
|
@ -540,9 +540,8 @@ initialize_aggregate(AggState *aggstate, AggStatePerAgg peraggstate,
|
|||
/*
|
||||
* (Re)set transValue to the initial value.
|
||||
*
|
||||
* Note that when the initial value is pass-by-ref, we must copy
|
||||
* it (into the aggcontext) since we will pfree the transValue
|
||||
* later.
|
||||
* Note that when the initial value is pass-by-ref, we must copy it (into
|
||||
* the aggcontext) since we will pfree the transValue later.
|
||||
*/
|
||||
if (peraggstate->initValueIsNull)
|
||||
pergroupstate->transValue = peraggstate->initValue;
|
||||
|
@ -551,7 +550,7 @@ initialize_aggregate(AggState *aggstate, AggStatePerAgg peraggstate,
|
|||
MemoryContext oldContext;
|
||||
|
||||
oldContext = MemoryContextSwitchTo(
|
||||
aggstate->aggcontexts[aggstate->current_set]->ecxt_per_tuple_memory);
|
||||
aggstate->aggcontexts[aggstate->current_set]->ecxt_per_tuple_memory);
|
||||
pergroupstate->transValue = datumCopy(peraggstate->initValue,
|
||||
peraggstate->transtypeByVal,
|
||||
peraggstate->transtypeLen);
|
||||
|
@ -560,11 +559,11 @@ initialize_aggregate(AggState *aggstate, AggStatePerAgg peraggstate,
|
|||
pergroupstate->transValueIsNull = peraggstate->initValueIsNull;
|
||||
|
||||
/*
|
||||
* If the initial value for the transition state doesn't exist in
|
||||
* the pg_aggregate table then we will let the first non-NULL
|
||||
* value returned from the outer procNode become the initial
|
||||
* value. (This is useful for aggregates like max() and min().)
|
||||
* The noTransValue flag signals that we still need to do this.
|
||||
* If the initial value for the transition state doesn't exist in the
|
||||
* pg_aggregate table then we will let the first non-NULL value returned
|
||||
* from the outer procNode become the initial value. (This is useful for
|
||||
* aggregates like max() and min().) The noTransValue flag signals that we
|
||||
* still need to do this.
|
||||
*/
|
||||
pergroupstate->noTransValue = peraggstate->initValueIsNull;
|
||||
}
|
||||
|
@ -586,8 +585,8 @@ initialize_aggregates(AggState *aggstate,
|
|||
int numReset)
|
||||
{
|
||||
int aggno;
|
||||
int numGroupingSets = Max(aggstate->phase->numsets, 1);
|
||||
int setno = 0;
|
||||
int numGroupingSets = Max(aggstate->phase->numsets, 1);
|
||||
int setno = 0;
|
||||
|
||||
if (numReset < 1)
|
||||
numReset = numGroupingSets;
|
||||
|
@ -655,7 +654,7 @@ advance_transition_function(AggState *aggstate,
|
|||
* do not need to pfree the old transValue, since it's NULL.
|
||||
*/
|
||||
oldContext = MemoryContextSwitchTo(
|
||||
aggstate->aggcontexts[aggstate->current_set]->ecxt_per_tuple_memory);
|
||||
aggstate->aggcontexts[aggstate->current_set]->ecxt_per_tuple_memory);
|
||||
pergroupstate->transValue = datumCopy(fcinfo->arg[1],
|
||||
peraggstate->transtypeByVal,
|
||||
peraggstate->transtypeLen);
|
||||
|
@ -730,9 +729,9 @@ static void
|
|||
advance_aggregates(AggState *aggstate, AggStatePerGroup pergroup)
|
||||
{
|
||||
int aggno;
|
||||
int setno = 0;
|
||||
int numGroupingSets = Max(aggstate->phase->numsets, 1);
|
||||
int numAggs = aggstate->numaggs;
|
||||
int setno = 0;
|
||||
int numGroupingSets = Max(aggstate->phase->numsets, 1);
|
||||
int numAggs = aggstate->numaggs;
|
||||
|
||||
for (aggno = 0; aggno < numAggs; aggno++)
|
||||
{
|
||||
|
@ -1134,7 +1133,7 @@ prepare_projection_slot(AggState *aggstate, TupleTableSlot *slot, int currentSet
|
|||
{
|
||||
if (aggstate->phase->grouped_cols)
|
||||
{
|
||||
Bitmapset *grouped_cols = aggstate->phase->grouped_cols[currentSet];
|
||||
Bitmapset *grouped_cols = aggstate->phase->grouped_cols[currentSet];
|
||||
|
||||
aggstate->grouped_cols = grouped_cols;
|
||||
|
||||
|
@ -1156,7 +1155,7 @@ prepare_projection_slot(AggState *aggstate, TupleTableSlot *slot, int currentSet
|
|||
|
||||
foreach(lc, aggstate->all_grouped_cols)
|
||||
{
|
||||
int attnum = lfirst_int(lc);
|
||||
int attnum = lfirst_int(lc);
|
||||
|
||||
if (!bms_is_member(attnum, grouped_cols))
|
||||
slot->tts_isnull[attnum - 1] = true;
|
||||
|
@ -1225,8 +1224,7 @@ project_aggregates(AggState *aggstate)
|
|||
ExprContext *econtext = aggstate->ss.ps.ps_ExprContext;
|
||||
|
||||
/*
|
||||
* Check the qual (HAVING clause); if the group does not match, ignore
|
||||
* it.
|
||||
* Check the qual (HAVING clause); if the group does not match, ignore it.
|
||||
*/
|
||||
if (ExecQual(aggstate->ss.ps.qual, econtext, false))
|
||||
{
|
||||
|
@ -1286,7 +1284,7 @@ find_unaggregated_cols_walker(Node *node, Bitmapset **colnos)
|
|||
*colnos = bms_add_member(*colnos, var->varattno);
|
||||
return false;
|
||||
}
|
||||
if (IsA(node, Aggref) || IsA(node, GroupingFunc))
|
||||
if (IsA(node, Aggref) ||IsA(node, GroupingFunc))
|
||||
{
|
||||
/* do not descend into aggregate exprs */
|
||||
return false;
|
||||
|
@ -1319,7 +1317,7 @@ build_hash_table(AggState *aggstate)
|
|||
aggstate->hashfunctions,
|
||||
node->numGroups,
|
||||
entrysize,
|
||||
aggstate->aggcontexts[0]->ecxt_per_tuple_memory,
|
||||
aggstate->aggcontexts[0]->ecxt_per_tuple_memory,
|
||||
tmpmem);
|
||||
}
|
||||
|
||||
|
@ -1521,8 +1519,8 @@ agg_retrieve_direct(AggState *aggstate)
|
|||
/*
|
||||
* get state info from node
|
||||
*
|
||||
* econtext is the per-output-tuple expression context
|
||||
* tmpcontext is the per-input-tuple expression context
|
||||
* econtext is the per-output-tuple expression context tmpcontext is the
|
||||
* per-input-tuple expression context
|
||||
*/
|
||||
econtext = aggstate->ss.ps.ps_ExprContext;
|
||||
tmpcontext = aggstate->tmpcontext;
|
||||
|
@ -1615,17 +1613,17 @@ agg_retrieve_direct(AggState *aggstate)
|
|||
* If a subgroup for the current grouping set is present, project it.
|
||||
*
|
||||
* We have a new group if:
|
||||
* - we're out of input but haven't projected all grouping sets
|
||||
* (checked above)
|
||||
* - we're out of input but haven't projected all grouping sets
|
||||
* (checked above)
|
||||
* OR
|
||||
* - we already projected a row that wasn't from the last grouping
|
||||
* set
|
||||
* AND
|
||||
* - the next grouping set has at least one grouping column (since
|
||||
* empty grouping sets project only once input is exhausted)
|
||||
* AND
|
||||
* - the previous and pending rows differ on the grouping columns
|
||||
* of the next grouping set
|
||||
* - we already projected a row that wasn't from the last grouping
|
||||
* set
|
||||
* AND
|
||||
* - the next grouping set has at least one grouping column (since
|
||||
* empty grouping sets project only once input is exhausted)
|
||||
* AND
|
||||
* - the previous and pending rows differ on the grouping columns
|
||||
* of the next grouping set
|
||||
*/
|
||||
if (aggstate->input_done ||
|
||||
(node->aggstrategy == AGG_SORTED &&
|
||||
|
@ -1729,7 +1727,8 @@ agg_retrieve_direct(AggState *aggstate)
|
|||
firstSlot,
|
||||
InvalidBuffer,
|
||||
true);
|
||||
aggstate->grp_firstTuple = NULL; /* don't keep two pointers */
|
||||
aggstate->grp_firstTuple = NULL; /* don't keep two
|
||||
* pointers */
|
||||
|
||||
/* set up for first advance_aggregates call */
|
||||
tmpcontext->ecxt_outertuple = firstSlot;
|
||||
|
@ -1774,7 +1773,7 @@ agg_retrieve_direct(AggState *aggstate)
|
|||
node->numCols,
|
||||
node->grpColIdx,
|
||||
aggstate->phase->eqfunctions,
|
||||
tmpcontext->ecxt_per_tuple_memory))
|
||||
tmpcontext->ecxt_per_tuple_memory))
|
||||
{
|
||||
aggstate->grp_firstTuple = ExecCopySlotTuple(outerslot);
|
||||
break;
|
||||
|
@ -1787,8 +1786,8 @@ agg_retrieve_direct(AggState *aggstate)
|
|||
* Use the representative input tuple for any references to
|
||||
* non-aggregated input columns in aggregate direct args, the node
|
||||
* qual, and the tlist. (If we are not grouping, and there are no
|
||||
* input rows at all, we will come here with an empty firstSlot ...
|
||||
* but if not grouping, there can't be any references to
|
||||
* input rows at all, we will come here with an empty firstSlot
|
||||
* ... but if not grouping, there can't be any references to
|
||||
* non-aggregated input columns, so no problem.)
|
||||
*/
|
||||
econtext->ecxt_outertuple = firstSlot;
|
||||
|
@ -1803,8 +1802,8 @@ agg_retrieve_direct(AggState *aggstate)
|
|||
finalize_aggregates(aggstate, peragg, pergroup, currentSet);
|
||||
|
||||
/*
|
||||
* If there's no row to project right now, we must continue rather than
|
||||
* returning a null since there might be more groups.
|
||||
* If there's no row to project right now, we must continue rather
|
||||
* than returning a null since there might be more groups.
|
||||
*/
|
||||
result = project_aggregates(aggstate);
|
||||
if (result)
|
||||
|
@ -1996,7 +1995,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
|
|||
|
||||
foreach(l, node->chain)
|
||||
{
|
||||
Agg *agg = lfirst(l);
|
||||
Agg *agg = lfirst(l);
|
||||
|
||||
numGroupingSets = Max(numGroupingSets,
|
||||
list_length(agg->groupingSets));
|
||||
|
@ -2074,7 +2073,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
|
|||
ExecAssignScanTypeFromOuterPlan(&aggstate->ss);
|
||||
if (node->chain)
|
||||
ExecSetSlotDescriptor(aggstate->sort_slot,
|
||||
aggstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor);
|
||||
aggstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor);
|
||||
|
||||
/*
|
||||
* Initialize result tuple type and projection info.
|
||||
|
@ -2111,13 +2110,13 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
|
|||
for (phase = 0; phase < numPhases; ++phase)
|
||||
{
|
||||
AggStatePerPhase phasedata = &aggstate->phases[phase];
|
||||
Agg *aggnode;
|
||||
Sort *sortnode;
|
||||
int num_sets;
|
||||
Agg *aggnode;
|
||||
Sort *sortnode;
|
||||
int num_sets;
|
||||
|
||||
if (phase > 0)
|
||||
{
|
||||
aggnode = list_nth(node->chain, phase-1);
|
||||
aggnode = list_nth(node->chain, phase - 1);
|
||||
sortnode = (Sort *) aggnode->plan.lefttree;
|
||||
Assert(IsA(sortnode, Sort));
|
||||
}
|
||||
|
@ -2137,8 +2136,8 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
|
|||
i = 0;
|
||||
foreach(l, aggnode->groupingSets)
|
||||
{
|
||||
int current_length = list_length(lfirst(l));
|
||||
Bitmapset *cols = NULL;
|
||||
int current_length = list_length(lfirst(l));
|
||||
Bitmapset *cols = NULL;
|
||||
|
||||
/* planner forces this to be correct */
|
||||
for (j = 0; j < current_length; ++j)
|
||||
|
@ -2288,8 +2287,8 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
|
|||
/* Begin filling in the peraggstate data */
|
||||
peraggstate->aggrefstate = aggrefstate;
|
||||
peraggstate->aggref = aggref;
|
||||
peraggstate->sortstates =(Tuplesortstate**)
|
||||
palloc0(sizeof(Tuplesortstate*) * numGroupingSets);
|
||||
peraggstate->sortstates = (Tuplesortstate **)
|
||||
palloc0(sizeof(Tuplesortstate *) * numGroupingSets);
|
||||
|
||||
for (currentsortno = 0; currentsortno < numGroupingSets; currentsortno++)
|
||||
peraggstate->sortstates[currentsortno] = NULL;
|
||||
|
@ -2643,11 +2642,11 @@ void
|
|||
ExecReScanAgg(AggState *node)
|
||||
{
|
||||
ExprContext *econtext = node->ss.ps.ps_ExprContext;
|
||||
PlanState *outerPlan = outerPlanState(node);
|
||||
PlanState *outerPlan = outerPlanState(node);
|
||||
Agg *aggnode = (Agg *) node->ss.ps.plan;
|
||||
int aggno;
|
||||
int numGroupingSets = Max(node->maxsets, 1);
|
||||
int setno;
|
||||
int numGroupingSets = Max(node->maxsets, 1);
|
||||
int setno;
|
||||
|
||||
node->agg_done = false;
|
||||
|
||||
|
@ -2732,7 +2731,7 @@ ExecReScanAgg(AggState *node)
|
|||
* Reset the per-group state (in particular, mark transvalues null)
|
||||
*/
|
||||
MemSet(node->pergroup, 0,
|
||||
sizeof(AggStatePerGroupData) * node->numaggs * numGroupingSets);
|
||||
sizeof(AggStatePerGroupData) * node->numaggs * numGroupingSets);
|
||||
|
||||
/* reset to phase 0 */
|
||||
initialize_phase(node, 0);
|
||||
|
@ -2775,8 +2774,9 @@ AggCheckCallContext(FunctionCallInfo fcinfo, MemoryContext *aggcontext)
|
|||
{
|
||||
if (aggcontext)
|
||||
{
|
||||
AggState *aggstate = ((AggState *) fcinfo->context);
|
||||
ExprContext *cxt = aggstate->aggcontexts[aggstate->current_set];
|
||||
AggState *aggstate = ((AggState *) fcinfo->context);
|
||||
ExprContext *cxt = aggstate->aggcontexts[aggstate->current_set];
|
||||
|
||||
*aggcontext = cxt->ecxt_per_tuple_memory;
|
||||
}
|
||||
return AGG_CONTEXT_AGGREGATE;
|
||||
|
@ -2862,7 +2862,7 @@ AggRegisterCallback(FunctionCallInfo fcinfo,
|
|||
if (fcinfo->context && IsA(fcinfo->context, AggState))
|
||||
{
|
||||
AggState *aggstate = (AggState *) fcinfo->context;
|
||||
ExprContext *cxt = aggstate->aggcontexts[aggstate->current_set];
|
||||
ExprContext *cxt = aggstate->aggcontexts[aggstate->current_set];
|
||||
|
||||
RegisterExprContextCallback(cxt, func, arg);
|
||||
|
||||
|
|
|
@ -449,7 +449,7 @@ ExecBitmapHeapScan(BitmapHeapScanState *node)
|
|||
void
|
||||
ExecReScanBitmapHeapScan(BitmapHeapScanState *node)
|
||||
{
|
||||
PlanState *outerPlan = outerPlanState(node);
|
||||
PlanState *outerPlan = outerPlanState(node);
|
||||
|
||||
/* rescan to release any page pin */
|
||||
heap_rescan(node->ss.ss_currentScanDesc, NULL);
|
||||
|
|
|
@ -280,7 +280,7 @@ ExecEndGroup(GroupState *node)
|
|||
void
|
||||
ExecReScanGroup(GroupState *node)
|
||||
{
|
||||
PlanState *outerPlan = outerPlanState(node);
|
||||
PlanState *outerPlan = outerPlanState(node);
|
||||
|
||||
node->grp_done = FALSE;
|
||||
node->ss.ps.ps_TupFromTlist = false;
|
||||
|
|
|
@ -500,8 +500,8 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
|
|||
bucket_bytes = sizeof(HashJoinTuple) * nbuckets;
|
||||
|
||||
/*
|
||||
* If there's not enough space to store the projected number of tuples
|
||||
* and the required bucket headers, we will need multiple batches.
|
||||
* If there's not enough space to store the projected number of tuples and
|
||||
* the required bucket headers, we will need multiple batches.
|
||||
*/
|
||||
if (inner_rel_bytes + bucket_bytes > hash_table_bytes)
|
||||
{
|
||||
|
@ -512,8 +512,8 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
|
|||
long bucket_size;
|
||||
|
||||
/*
|
||||
* Estimate the number of buckets we'll want to have when work_mem
|
||||
* is entirely full. Each bucket will contain a bucket pointer plus
|
||||
* Estimate the number of buckets we'll want to have when work_mem is
|
||||
* entirely full. Each bucket will contain a bucket pointer plus
|
||||
* NTUP_PER_BUCKET tuples, whose projected size already includes
|
||||
* overhead for the hash code, pointer to the next tuple, etc.
|
||||
*/
|
||||
|
@ -527,9 +527,9 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
|
|||
* Buckets are simple pointers to hashjoin tuples, while tupsize
|
||||
* includes the pointer, hash code, and MinimalTupleData. So buckets
|
||||
* should never really exceed 25% of work_mem (even for
|
||||
* NTUP_PER_BUCKET=1); except maybe * for work_mem values that are
|
||||
* not 2^N bytes, where we might get more * because of doubling.
|
||||
* So let's look for 50% here.
|
||||
* NTUP_PER_BUCKET=1); except maybe * for work_mem values that are not
|
||||
* 2^N bytes, where we might get more * because of doubling. So let's
|
||||
* look for 50% here.
|
||||
*/
|
||||
Assert(bucket_bytes <= hash_table_bytes / 2);
|
||||
|
||||
|
@ -655,7 +655,7 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
|
|||
hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
|
||||
|
||||
hashtable->buckets = repalloc(hashtable->buckets,
|
||||
sizeof(HashJoinTuple) * hashtable->nbuckets);
|
||||
sizeof(HashJoinTuple) * hashtable->nbuckets);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -671,6 +671,7 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
|
|||
while (oldchunks != NULL)
|
||||
{
|
||||
HashMemoryChunk nextchunk = oldchunks->next;
|
||||
|
||||
/* position within the buffer (up to oldchunks->used) */
|
||||
size_t idx = 0;
|
||||
|
||||
|
@ -691,7 +692,8 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
|
|||
{
|
||||
/* keep tuple in memory - copy it into the new chunk */
|
||||
HashJoinTuple copyTuple =
|
||||
(HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
|
||||
(HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
|
||||
|
||||
memcpy(copyTuple, hashTuple, hashTupleSize);
|
||||
|
||||
/* and add it back to the appropriate bucket */
|
||||
|
@ -749,15 +751,15 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
|
|||
static void
|
||||
ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
|
||||
{
|
||||
HashMemoryChunk chunk;
|
||||
HashMemoryChunk chunk;
|
||||
|
||||
/* do nothing if not an increase (it's called increase for a reason) */
|
||||
if (hashtable->nbuckets >= hashtable->nbuckets_optimal)
|
||||
return;
|
||||
|
||||
/*
|
||||
* We already know the optimal number of buckets, so let's just
|
||||
* compute the log2_nbuckets for it.
|
||||
* We already know the optimal number of buckets, so let's just compute
|
||||
* the log2_nbuckets for it.
|
||||
*/
|
||||
hashtable->nbuckets = hashtable->nbuckets_optimal;
|
||||
hashtable->log2_nbuckets = my_log2(hashtable->nbuckets_optimal);
|
||||
|
@ -771,14 +773,14 @@ ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
|
|||
#endif
|
||||
|
||||
/*
|
||||
* Just reallocate the proper number of buckets - we don't need to
|
||||
* walk through them - we can walk the dense-allocated chunks
|
||||
* (just like in ExecHashIncreaseNumBatches, but without all the
|
||||
* copying into new chunks)
|
||||
* Just reallocate the proper number of buckets - we don't need to walk
|
||||
* through them - we can walk the dense-allocated chunks (just like in
|
||||
* ExecHashIncreaseNumBatches, but without all the copying into new
|
||||
* chunks)
|
||||
*/
|
||||
hashtable->buckets =
|
||||
(HashJoinTuple *) repalloc(hashtable->buckets,
|
||||
hashtable->nbuckets * sizeof(HashJoinTuple));
|
||||
hashtable->nbuckets * sizeof(HashJoinTuple));
|
||||
|
||||
memset(hashtable->buckets, 0, sizeof(void *) * hashtable->nbuckets);
|
||||
|
||||
|
@ -786,12 +788,13 @@ ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
|
|||
for (chunk = hashtable->chunks; chunk != NULL; chunk = chunk->next)
|
||||
{
|
||||
/* process all tuples stored in this chunk */
|
||||
size_t idx = 0;
|
||||
size_t idx = 0;
|
||||
|
||||
while (idx < chunk->used)
|
||||
{
|
||||
HashJoinTuple hashTuple = (HashJoinTuple) (chunk->data + idx);
|
||||
int bucketno;
|
||||
int batchno;
|
||||
int bucketno;
|
||||
int batchno;
|
||||
|
||||
ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
|
||||
&bucketno, &batchno);
|
||||
|
@ -869,10 +872,11 @@ ExecHashTableInsert(HashJoinTable hashtable,
|
|||
|
||||
/*
|
||||
* Increase the (optimal) number of buckets if we just exceeded the
|
||||
* NTUP_PER_BUCKET threshold, but only when there's still a single batch.
|
||||
* NTUP_PER_BUCKET threshold, but only when there's still a single
|
||||
* batch.
|
||||
*/
|
||||
if ((hashtable->nbatch == 1) &&
|
||||
(hashtable->nbuckets_optimal <= INT_MAX/2) && /* overflow protection */
|
||||
(hashtable->nbuckets_optimal <= INT_MAX / 2) && /* overflow protection */
|
||||
(ntuples >= (hashtable->nbuckets_optimal * NTUP_PER_BUCKET)))
|
||||
{
|
||||
hashtable->nbuckets_optimal *= 2;
|
||||
|
@ -1636,7 +1640,7 @@ dense_alloc(HashJoinTable hashtable, Size size)
|
|||
{
|
||||
/* allocate new chunk and put it at the beginning of the list */
|
||||
newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
|
||||
offsetof(HashMemoryChunkData, data) + size);
|
||||
offsetof(HashMemoryChunkData, data) + size);
|
||||
newChunk->maxlen = size;
|
||||
newChunk->used = 0;
|
||||
newChunk->ntuples = 0;
|
||||
|
@ -1663,15 +1667,15 @@ dense_alloc(HashJoinTable hashtable, Size size)
|
|||
}
|
||||
|
||||
/*
|
||||
* See if we have enough space for it in the current chunk (if any).
|
||||
* If not, allocate a fresh chunk.
|
||||
* See if we have enough space for it in the current chunk (if any). If
|
||||
* not, allocate a fresh chunk.
|
||||
*/
|
||||
if ((hashtable->chunks == NULL) ||
|
||||
(hashtable->chunks->maxlen - hashtable->chunks->used) < size)
|
||||
{
|
||||
/* allocate new chunk and put it at the beginning of the list */
|
||||
newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
|
||||
offsetof(HashMemoryChunkData, data) + HASH_CHUNK_SIZE);
|
||||
offsetof(HashMemoryChunkData, data) + HASH_CHUNK_SIZE);
|
||||
|
||||
newChunk->maxlen = HASH_CHUNK_SIZE;
|
||||
newChunk->used = size;
|
||||
|
|
|
@ -106,8 +106,8 @@ IndexOnlyNext(IndexOnlyScanState *node)
|
|||
* away, because the tuple is still visible until the deleting
|
||||
* transaction commits or the statement ends (if it's our
|
||||
* transaction). In either case, the lock on the VM buffer will have
|
||||
* been released (acting as a write barrier) after clearing the
|
||||
* bit. And for us to have a snapshot that includes the deleting
|
||||
* been released (acting as a write barrier) after clearing the bit.
|
||||
* And for us to have a snapshot that includes the deleting
|
||||
* transaction (making the tuple invisible), we must have acquired
|
||||
* ProcArrayLock after that time, acting as a read barrier.
|
||||
*
|
||||
|
|
|
@ -288,9 +288,9 @@ next_indextuple:
|
|||
* Can we return this tuple immediately, or does it need to be pushed
|
||||
* to the reorder queue? If the ORDER BY expression values returned
|
||||
* by the index were inaccurate, we can't return it yet, because the
|
||||
* next tuple from the index might need to come before this one.
|
||||
* Also, we can't return it yet if there are any smaller tuples in the
|
||||
* queue already.
|
||||
* next tuple from the index might need to come before this one. Also,
|
||||
* we can't return it yet if there are any smaller tuples in the queue
|
||||
* already.
|
||||
*/
|
||||
if (!was_exact || (topmost && cmp_orderbyvals(lastfetched_vals,
|
||||
lastfetched_nulls,
|
||||
|
|
|
@ -196,11 +196,12 @@ lnext:
|
|||
* case, so as to avoid the "Halloween problem" of repeated
|
||||
* update attempts. In the latter case it might be sensible
|
||||
* to fetch the updated tuple instead, but doing so would
|
||||
* require changing heap_update and heap_delete to not complain
|
||||
* about updating "invisible" tuples, which seems pretty scary
|
||||
* (heap_lock_tuple will not complain, but few callers expect
|
||||
* HeapTupleInvisible, and we're not one of them). So for now,
|
||||
* treat the tuple as deleted and do not process.
|
||||
* require changing heap_update and heap_delete to not
|
||||
* complain about updating "invisible" tuples, which seems
|
||||
* pretty scary (heap_lock_tuple will not complain, but few
|
||||
* callers expect HeapTupleInvisible, and we're not one of
|
||||
* them). So for now, treat the tuple as deleted and do not
|
||||
* process.
|
||||
*/
|
||||
goto lnext;
|
||||
|
||||
|
|
|
@ -317,7 +317,7 @@ ExecMaterialRestrPos(MaterialState *node)
|
|||
void
|
||||
ExecReScanMaterial(MaterialState *node)
|
||||
{
|
||||
PlanState *outerPlan = outerPlanState(node);
|
||||
PlanState *outerPlan = outerPlanState(node);
|
||||
|
||||
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
|
||||
|
||||
|
|
|
@ -139,10 +139,10 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags)
|
|||
|
||||
/*
|
||||
* It isn't feasible to perform abbreviated key conversion, since
|
||||
* tuples are pulled into mergestate's binary heap as needed. It would
|
||||
* likely be counter-productive to convert tuples into an abbreviated
|
||||
* representation as they're pulled up, so opt out of that additional
|
||||
* optimization entirely.
|
||||
* tuples are pulled into mergestate's binary heap as needed. It
|
||||
* would likely be counter-productive to convert tuples into an
|
||||
* abbreviated representation as they're pulled up, so opt out of that
|
||||
* additional optimization entirely.
|
||||
*/
|
||||
sortKey->abbreviate = false;
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue