Ye-old pgindent run. Same 4-space tabs.
This commit is contained in:
parent
db4518729d
commit
52f77df613
434 changed files with 24799 additions and 21246 deletions
|
@ -144,7 +144,7 @@ array_texteq(ArrayType *array, char *value)
|
|||
{
|
||||
return array_iterator((Oid) 25, /* text */
|
||||
(Oid) 67, /* texteq */
|
||||
0, /* logical or */
|
||||
0, /* logical or */
|
||||
array, (Datum) value);
|
||||
}
|
||||
|
||||
|
@ -153,7 +153,7 @@ array_all_texteq(ArrayType *array, char *value)
|
|||
{
|
||||
return array_iterator((Oid) 25, /* text */
|
||||
(Oid) 67, /* texteq */
|
||||
1, /* logical and */
|
||||
1, /* logical and */
|
||||
array, (Datum) value);
|
||||
}
|
||||
|
||||
|
@ -162,7 +162,7 @@ array_textregexeq(ArrayType *array, char *value)
|
|||
{
|
||||
return array_iterator((Oid) 25, /* text */
|
||||
(Oid) 1254, /* textregexeq */
|
||||
0, /* logical or */
|
||||
0, /* logical or */
|
||||
array, (Datum) value);
|
||||
}
|
||||
|
||||
|
@ -171,7 +171,7 @@ array_all_textregexeq(ArrayType *array, char *value)
|
|||
{
|
||||
return array_iterator((Oid) 25, /* text */
|
||||
(Oid) 1254, /* textregexeq */
|
||||
1, /* logical and */
|
||||
1, /* logical and */
|
||||
array, (Datum) value);
|
||||
}
|
||||
|
||||
|
@ -185,7 +185,7 @@ array_varchareq(ArrayType *array, char *value)
|
|||
{
|
||||
return array_iterator((Oid) 1043, /* varchar */
|
||||
(Oid) 1070, /* varchareq */
|
||||
0, /* logical or */
|
||||
0, /* logical or */
|
||||
array, (Datum) value);
|
||||
}
|
||||
|
||||
|
@ -194,7 +194,7 @@ array_all_varchareq(ArrayType *array, char *value)
|
|||
{
|
||||
return array_iterator((Oid) 1043, /* varchar */
|
||||
(Oid) 1070, /* varchareq */
|
||||
1, /* logical and */
|
||||
1, /* logical and */
|
||||
array, (Datum) value);
|
||||
}
|
||||
|
||||
|
@ -203,7 +203,7 @@ array_varcharregexeq(ArrayType *array, char *value)
|
|||
{
|
||||
return array_iterator((Oid) 1043, /* varchar */
|
||||
(Oid) 1254, /* textregexeq */
|
||||
0, /* logical or */
|
||||
0, /* logical or */
|
||||
array, (Datum) value);
|
||||
}
|
||||
|
||||
|
@ -212,7 +212,7 @@ array_all_varcharregexeq(ArrayType *array, char *value)
|
|||
{
|
||||
return array_iterator((Oid) 1043, /* varchar */
|
||||
(Oid) 1254, /* textregexeq */
|
||||
1, /* logical and */
|
||||
1, /* logical and */
|
||||
array, (Datum) value);
|
||||
}
|
||||
|
||||
|
@ -226,7 +226,7 @@ array_bpchareq(ArrayType *array, char *value)
|
|||
{
|
||||
return array_iterator((Oid) 1042, /* bpchar */
|
||||
(Oid) 1048, /* bpchareq */
|
||||
0, /* logical or */
|
||||
0, /* logical or */
|
||||
array, (Datum) value);
|
||||
}
|
||||
|
||||
|
@ -235,7 +235,7 @@ array_all_bpchareq(ArrayType *array, char *value)
|
|||
{
|
||||
return array_iterator((Oid) 1042, /* bpchar */
|
||||
(Oid) 1048, /* bpchareq */
|
||||
1, /* logical and */
|
||||
1, /* logical and */
|
||||
array, (Datum) value);
|
||||
}
|
||||
|
||||
|
@ -244,7 +244,7 @@ array_bpcharregexeq(ArrayType *array, char *value)
|
|||
{
|
||||
return array_iterator((Oid) 1042, /* bpchar */
|
||||
(Oid) 1254, /* textregexeq */
|
||||
0, /* logical or */
|
||||
0, /* logical or */
|
||||
array, (Datum) value);
|
||||
}
|
||||
|
||||
|
@ -253,7 +253,7 @@ array_all_bpcharregexeq(ArrayType *array, char *value)
|
|||
{
|
||||
return array_iterator((Oid) 1042, /* bpchar */
|
||||
(Oid) 1254, /* textregexeq */
|
||||
1, /* logical and */
|
||||
1, /* logical and */
|
||||
array, (Datum) value);
|
||||
}
|
||||
|
||||
|
@ -266,7 +266,7 @@ array_int4eq(ArrayType *array, int4 value)
|
|||
{
|
||||
return array_iterator((Oid) 23, /* int4 */
|
||||
(Oid) 65, /* int4eq */
|
||||
0, /* logical or */
|
||||
0, /* logical or */
|
||||
array, (Datum) value);
|
||||
}
|
||||
|
||||
|
@ -275,7 +275,7 @@ array_all_int4eq(ArrayType *array, int4 value)
|
|||
{
|
||||
return array_iterator((Oid) 23, /* int4 */
|
||||
(Oid) 65, /* int4eq */
|
||||
1, /* logical and */
|
||||
1, /* logical and */
|
||||
array, (Datum) value);
|
||||
}
|
||||
|
||||
|
@ -284,7 +284,7 @@ array_int4ne(ArrayType *array, int4 value)
|
|||
{
|
||||
return array_iterator((Oid) 23, /* int4 */
|
||||
(Oid) 144, /* int4ne */
|
||||
0, /* logical or */
|
||||
0, /* logical or */
|
||||
array, (Datum) value);
|
||||
}
|
||||
|
||||
|
@ -293,7 +293,7 @@ array_all_int4ne(ArrayType *array, int4 value)
|
|||
{
|
||||
return array_iterator((Oid) 23, /* int4 */
|
||||
(Oid) 144, /* int4ne */
|
||||
1, /* logical and */
|
||||
1, /* logical and */
|
||||
array, (Datum) value);
|
||||
}
|
||||
|
||||
|
@ -302,7 +302,7 @@ array_int4gt(ArrayType *array, int4 value)
|
|||
{
|
||||
return array_iterator((Oid) 23, /* int4 */
|
||||
(Oid) 147, /* int4gt */
|
||||
0, /* logical or */
|
||||
0, /* logical or */
|
||||
array, (Datum) value);
|
||||
}
|
||||
|
||||
|
@ -311,7 +311,7 @@ array_all_int4gt(ArrayType *array, int4 value)
|
|||
{
|
||||
return array_iterator((Oid) 23, /* int4 */
|
||||
(Oid) 147, /* int4gt */
|
||||
1, /* logical and */
|
||||
1, /* logical and */
|
||||
array, (Datum) value);
|
||||
}
|
||||
|
||||
|
@ -320,7 +320,7 @@ array_int4ge(ArrayType *array, int4 value)
|
|||
{
|
||||
return array_iterator((Oid) 23, /* int4 */
|
||||
(Oid) 150, /* int4ge */
|
||||
0, /* logical or */
|
||||
0, /* logical or */
|
||||
array, (Datum) value);
|
||||
}
|
||||
|
||||
|
@ -329,7 +329,7 @@ array_all_int4ge(ArrayType *array, int4 value)
|
|||
{
|
||||
return array_iterator((Oid) 23, /* int4 */
|
||||
(Oid) 150, /* int4ge */
|
||||
1, /* logical and */
|
||||
1, /* logical and */
|
||||
array, (Datum) value);
|
||||
}
|
||||
|
||||
|
@ -338,7 +338,7 @@ array_int4lt(ArrayType *array, int4 value)
|
|||
{
|
||||
return array_iterator((Oid) 23, /* int4 */
|
||||
(Oid) 66, /* int4lt */
|
||||
0, /* logical or */
|
||||
0, /* logical or */
|
||||
array, (Datum) value);
|
||||
}
|
||||
|
||||
|
@ -347,7 +347,7 @@ array_all_int4lt(ArrayType *array, int4 value)
|
|||
{
|
||||
return array_iterator((Oid) 23, /* int4 */
|
||||
(Oid) 66, /* int4lt */
|
||||
1, /* logical and */
|
||||
1, /* logical and */
|
||||
array, (Datum) value);
|
||||
}
|
||||
|
||||
|
@ -356,7 +356,7 @@ array_int4le(ArrayType *array, int4 value)
|
|||
{
|
||||
return array_iterator((Oid) 23, /* int4 */
|
||||
(Oid) 149, /* int4le */
|
||||
0, /* logical or */
|
||||
0, /* logical or */
|
||||
array, (Datum) value);
|
||||
}
|
||||
|
||||
|
@ -365,7 +365,7 @@ array_all_int4le(ArrayType *array, int4 value)
|
|||
{
|
||||
return array_iterator((Oid) 23, /* int4 */
|
||||
(Oid) 149, /* int4le */
|
||||
1, /* logical and */
|
||||
1, /* logical and */
|
||||
array, (Datum) value);
|
||||
}
|
||||
|
||||
|
@ -376,7 +376,7 @@ array_oideq(ArrayType *array, Oid value)
|
|||
{
|
||||
return array_iterator((Oid) 26, /* oid */
|
||||
(Oid) 184, /* oideq */
|
||||
0, /* logical or */
|
||||
0, /* logical or */
|
||||
array, (Datum) value);
|
||||
}
|
||||
|
||||
|
@ -385,7 +385,7 @@ array_all_oidne(ArrayType *array, Oid value)
|
|||
{
|
||||
return array_iterator((Oid) 26, /* int4 */
|
||||
(Oid) 185, /* oidne */
|
||||
1, /* logical and */
|
||||
1, /* logical and */
|
||||
array, (Datum) value);
|
||||
}
|
||||
|
||||
|
@ -393,8 +393,8 @@ array_all_oidne(ArrayType *array, Oid value)
|
|||
|
||||
/*
|
||||
* Local Variables:
|
||||
* tab-width: 4
|
||||
* c-indent-level: 4
|
||||
* c-basic-offset: 4
|
||||
* tab-width: 4
|
||||
* c-indent-level: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
*/
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#define ARRAY_ITERATOR_H
|
||||
|
||||
static int32 array_iterator(Oid elemtype, Oid proc, int and,
|
||||
ArrayType *array, Datum value);
|
||||
ArrayType *array, Datum value);
|
||||
|
||||
int32 array_texteq(ArrayType *array, char *value);
|
||||
int32 array_all_texteq(ArrayType *array, char *value);
|
||||
|
@ -32,14 +32,15 @@ int32 array_all_int4lt(ArrayType *array, int4 value);
|
|||
int32 array_int4le(ArrayType *array, int4 value);
|
||||
int32 array_all_int4le(ArrayType *array, int4 value);
|
||||
|
||||
int32 array_oideq(ArrayType *array, Oid value);
|
||||
int32 array_all_oidne(ArrayType *array, Oid value);
|
||||
int32 array_oideq(ArrayType *array, Oid value);
|
||||
int32 array_all_oidne(ArrayType *array, Oid value);
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Local Variables:
|
||||
* tab-width: 4
|
||||
* c-indent-level: 4
|
||||
* c-basic-offset: 4
|
||||
* tab-width: 4
|
||||
* c-indent-level: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
*/
|
||||
|
|
1253
contrib/bit/varbit.c
1253
contrib/bit/varbit.c
File diff suppressed because it is too large
Load diff
|
@ -2,7 +2,7 @@
|
|||
#include <ctype.h>
|
||||
#include <errno.h>
|
||||
|
||||
#include <float.h> /* faked on sunos4 */
|
||||
#include <float.h> /* faked on sunos4 */
|
||||
|
||||
#include <math.h>
|
||||
|
||||
|
@ -10,7 +10,7 @@
|
|||
#ifdef HAVE_LIMITS_H
|
||||
#include <limits.h>
|
||||
#ifndef MAXINT
|
||||
#define MAXINT INT_MAX
|
||||
#define MAXINT INT_MAX
|
||||
#endif
|
||||
#else
|
||||
#ifdef HAVE_VALUES_H
|
||||
|
@ -22,7 +22,7 @@
|
|||
#include "utils/builtins.h"
|
||||
|
||||
|
||||
#define HEXDIG(z) (z)<10 ? ((z)+'0') : ((z)-10+'A')
|
||||
#define HEXDIG(z) (z)<10 ? ((z)+'0') : ((z)-10+'A')
|
||||
|
||||
/* Modeled on struct varlena from postgres.h, bu data type is bits8 */
|
||||
struct varbita
|
||||
|
@ -49,25 +49,25 @@ struct varbita
|
|||
#define VARBITEND(PTR) ((bits8 *) (PTR + VARSIZE(PTR)))
|
||||
/* Mask that will cover exactly one byte, i.e. BITSPERBYTE bits */
|
||||
#define BITMASK 0xFF
|
||||
#define BITHIGH 0x80
|
||||
#define BITHIGH 0x80
|
||||
|
||||
|
||||
bits8 * zpbitin(char *s, int dummy, int32 atttypmod);
|
||||
char * zpbitout(bits8 *s);
|
||||
char * zpbitsout(bits8 *s);
|
||||
bits8 * varbitin(char *s, int dummy, int32 atttypmod);
|
||||
bool biteq (bits8 *arg1, bits8 *arg2);
|
||||
bool bitne (bits8 *arg1, bits8 *arg2);
|
||||
bool bitge (bits8 *arg1, bits8 *arg2);
|
||||
bool bitgt (bits8 *arg1, bits8 *arg2);
|
||||
bool bitle (bits8 *arg1, bits8 *arg2);
|
||||
bool bitlt (bits8 *arg1, bits8 *arg2);
|
||||
int bitcmp (bits8 *arg1, bits8 *arg2);
|
||||
bits8 * bitand (bits8 * arg1, bits8 * arg2);
|
||||
bits8 * bitor (bits8 * arg1, bits8 * arg2);
|
||||
bits8 * bitxor (bits8 * arg1, bits8 * arg2);
|
||||
bits8 * bitnot (bits8 * arg);
|
||||
bits8 * bitshiftright (bits8 * arg, int shft);
|
||||
bits8 * bitshiftleft (bits8 * arg, int shft);
|
||||
bits8 * bitcat (bits8 *arg1, bits8 *arg2);
|
||||
bits8 * bitsubstr (bits8 *arg, int32 s, int32 l);
|
||||
bits8 *zpbitin(char *s, int dummy, int32 atttypmod);
|
||||
char *zpbitout(bits8 *s);
|
||||
char *zpbitsout(bits8 *s);
|
||||
bits8 *varbitin(char *s, int dummy, int32 atttypmod);
|
||||
bool biteq(bits8 *arg1, bits8 *arg2);
|
||||
bool bitne(bits8 *arg1, bits8 *arg2);
|
||||
bool bitge(bits8 *arg1, bits8 *arg2);
|
||||
bool bitgt(bits8 *arg1, bits8 *arg2);
|
||||
bool bitle(bits8 *arg1, bits8 *arg2);
|
||||
bool bitlt(bits8 *arg1, bits8 *arg2);
|
||||
int bitcmp(bits8 *arg1, bits8 *arg2);
|
||||
bits8 *bitand(bits8 *arg1, bits8 *arg2);
|
||||
bits8 *bitor(bits8 *arg1, bits8 *arg2);
|
||||
bits8 *bitxor(bits8 *arg1, bits8 *arg2);
|
||||
bits8 *bitnot(bits8 *arg);
|
||||
bits8 *bitshiftright(bits8 *arg, int shft);
|
||||
bits8 *bitshiftleft(bits8 *arg, int shft);
|
||||
bits8 *bitcat(bits8 *arg1, bits8 *arg2);
|
||||
bits8 *bitsubstr(bits8 *arg, int32 s, int32 l);
|
||||
|
|
|
@ -2,21 +2,23 @@
|
|||
|
||||
#include "varbit.h"
|
||||
|
||||
bits8 * varbit_in (char * s);
|
||||
char * varbit_out (bits8 *s);
|
||||
bits8 *varbit_in(char *s);
|
||||
char *varbit_out(bits8 *s);
|
||||
|
||||
bits8 *
|
||||
varbit_in (char * s) {
|
||||
return varbitin (s, 0, -1);
|
||||
varbit_in(char *s)
|
||||
{
|
||||
return varbitin(s, 0, -1);
|
||||
}
|
||||
|
||||
/*char *
|
||||
/*char *
|
||||
varbit_out (bits8 *s) {
|
||||
return zpbitout(s);
|
||||
}
|
||||
*/
|
||||
|
||||
char *
|
||||
varbit_out (bits8 *s) {
|
||||
return zpbitsout(s);
|
||||
char *
|
||||
varbit_out(bits8 *s)
|
||||
{
|
||||
return zpbitsout(s);
|
||||
}
|
||||
|
|
|
@ -2,173 +2,183 @@
|
|||
#include "varbit.h"
|
||||
#include <stdio.h>
|
||||
|
||||
void print_details (unsigned char *s);
|
||||
void print_details(unsigned char *s);
|
||||
|
||||
const int numb = 8;
|
||||
|
||||
const int numb = 8;
|
||||
/*
|
||||
const char *b[] = { "B0010", "B11011011", "B0001", "X3F12", "X27", "B",
|
||||
"X11", "B100111"};
|
||||
"X11", "B100111"};
|
||||
int atttypmod[] = {-1, -1, -1,-1,-1,-1,-1,-1 };
|
||||
*/
|
||||
const char *b[] = { "B0010", "B11011011", "B10001", "X3D12", "X27", "B",
|
||||
"X11", "B100111"};
|
||||
int atttypmod[] = { 7, 9, 6, 18, 11, 6, -1, -1 };
|
||||
const char *b[] = {"B0010", "B11011011", "B10001", "X3D12", "X27", "B",
|
||||
"X11", "B100111"};
|
||||
int atttypmod[] = {7, 9, 6, 18, 11, 6, -1, -1};
|
||||
|
||||
|
||||
void print_details (unsigned char *s)
|
||||
void
|
||||
print_details(unsigned char *s)
|
||||
{
|
||||
int i;
|
||||
printf ("Length in bytes : %d\n",VARSIZE(s));
|
||||
printf ("Length of bitstring: %d\n",VARBITLEN(s));
|
||||
for (i=8; i<VARSIZE(s); i++)
|
||||
printf ("%X%X ",s[i]>>4,s[i]&0xF);
|
||||
printf("\n");
|
||||
int i;
|
||||
|
||||
printf("Length in bytes : %d\n", VARSIZE(s));
|
||||
printf("Length of bitstring: %d\n", VARBITLEN(s));
|
||||
for (i = 8; i < VARSIZE(s); i++)
|
||||
printf("%X%X ", s[i] >> 4, s[i] & 0xF);
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
int
|
||||
main ()
|
||||
main()
|
||||
{
|
||||
int i, j;
|
||||
char *s[numb];
|
||||
int i,
|
||||
j;
|
||||
char *s[numb];
|
||||
|
||||
for (i=0; i<numb; i++) {
|
||||
printf ("Input: %s\n",b[i]);
|
||||
s[i] = zpbitin(b[i], 0, atttypmod[i]);
|
||||
//print_details(s[i]);
|
||||
printf ("%s = %s\n",zpbitout(s[i]),zpbitsout(s[i]));
|
||||
}
|
||||
for (i = 0; i < numb; i++)
|
||||
{
|
||||
printf("Input: %s\n", b[i]);
|
||||
s[i] = zpbitin(b[i], 0, atttypmod[i]);
|
||||
//print_details(s[i]);
|
||||
printf("%s = %s\n", zpbitout(s[i]), zpbitsout(s[i]));
|
||||
}
|
||||
|
||||
printf ("\nCOMPARISONS:\n");
|
||||
for (i=0; i<numb; i++)
|
||||
for (j=i+1; j<numb; j++)
|
||||
printf("%s <=> %s = %d\n",zpbitsout(s[i]),zpbitsout(s[j]),
|
||||
bitcmp(s[i],s[j]));
|
||||
printf("\nCOMPARISONS:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
for (j = i + 1; j < numb; j++)
|
||||
printf("%s <=> %s = %d\n", zpbitsout(s[i]), zpbitsout(s[j]),
|
||||
bitcmp(s[i], s[j]));
|
||||
|
||||
printf ("\nCONCATENATION:\n");
|
||||
for (i=0; i<numb; i++)
|
||||
for (j=i+1; j<numb; j++)
|
||||
printf("%s || %s = %s\n",zpbitsout(s[i]),zpbitsout(s[j]),
|
||||
zpbitsout(bitcat(s[i],s[j])));
|
||||
printf("\nCONCATENATION:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
for (j = i + 1; j < numb; j++)
|
||||
printf("%s || %s = %s\n", zpbitsout(s[i]), zpbitsout(s[j]),
|
||||
zpbitsout(bitcat(s[i], s[j])));
|
||||
|
||||
printf("\nSUBSTR:\n");
|
||||
printf("%s (%d,%d) => %s\n",zpbitsout(s[3]),1,8,
|
||||
zpbitsout(bitsubstr(s[3],1,8)));
|
||||
printf("%s (%d,%d) => %s\n",zpbitsout(s[3]),9,8,
|
||||
zpbitsout(bitsubstr(s[3],9,8)));
|
||||
printf("%s (%d,%d) => %s\n",zpbitsout(s[3]),1,9,
|
||||
zpbitsout(bitsubstr(s[3],1,9)));
|
||||
printf("%s (%d,%d) => %s\n",zpbitsout(s[3]),3,5,
|
||||
zpbitsout(bitsubstr(s[3],3,5)));
|
||||
printf("%s (%d,%d) => %s\n",zpbitsout(s[3]),3,9,
|
||||
zpbitsout(bitsubstr(s[3],3,9)));
|
||||
printf("%s (%d,%d) => %s\n",zpbitsout(s[3]),3,17,
|
||||
zpbitsout(bitsubstr(s[3],3,17)));
|
||||
printf ("\nLOGICAL AND:\n");
|
||||
for (i=0; i<numb; i++)
|
||||
for (j=i+1; j<numb; j++)
|
||||
printf("%s & %s = %s\n",zpbitsout(s[i]),zpbitsout(s[j]),
|
||||
zpbitsout(bitand(s[i],s[j])));
|
||||
printf("\nSUBSTR:\n");
|
||||
printf("%s (%d,%d) => %s\n", zpbitsout(s[3]), 1, 8,
|
||||
zpbitsout(bitsubstr(s[3], 1, 8)));
|
||||
printf("%s (%d,%d) => %s\n", zpbitsout(s[3]), 9, 8,
|
||||
zpbitsout(bitsubstr(s[3], 9, 8)));
|
||||
printf("%s (%d,%d) => %s\n", zpbitsout(s[3]), 1, 9,
|
||||
zpbitsout(bitsubstr(s[3], 1, 9)));
|
||||
printf("%s (%d,%d) => %s\n", zpbitsout(s[3]), 3, 5,
|
||||
zpbitsout(bitsubstr(s[3], 3, 5)));
|
||||
printf("%s (%d,%d) => %s\n", zpbitsout(s[3]), 3, 9,
|
||||
zpbitsout(bitsubstr(s[3], 3, 9)));
|
||||
printf("%s (%d,%d) => %s\n", zpbitsout(s[3]), 3, 17,
|
||||
zpbitsout(bitsubstr(s[3], 3, 17)));
|
||||
printf("\nLOGICAL AND:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
for (j = i + 1; j < numb; j++)
|
||||
printf("%s & %s = %s\n", zpbitsout(s[i]), zpbitsout(s[j]),
|
||||
zpbitsout(bitand(s[i], s[j])));
|
||||
|
||||
printf ("\nLOGICAL OR:\n");
|
||||
for (i=0; i<numb; i++)
|
||||
for (j=i+1; j<numb; j++)
|
||||
printf("%s | %s = %s\n",zpbitsout(s[i]),zpbitsout(s[j]),
|
||||
zpbitsout(bitor(s[i],s[j])));
|
||||
printf("\nLOGICAL OR:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
for (j = i + 1; j < numb; j++)
|
||||
printf("%s | %s = %s\n", zpbitsout(s[i]), zpbitsout(s[j]),
|
||||
zpbitsout(bitor(s[i], s[j])));
|
||||
|
||||
printf ("\nLOGICAL XOR:\n");
|
||||
for (i=0; i<numb; i++)
|
||||
for (j=i+1; j<numb; j++)
|
||||
printf("%s ^ %s = %s\n",zpbitsout(s[i]),zpbitsout(s[j]),
|
||||
zpbitsout(bitxor(s[i],s[j])));
|
||||
|
||||
printf ("\nLOGICAL NOT:\n");
|
||||
for (i=0; i<numb; i++)
|
||||
printf("~%s = %s\n",zpbitsout(s[i]),zpbitsout(bitnot(s[i])));
|
||||
printf("\nLOGICAL XOR:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
for (j = i + 1; j < numb; j++)
|
||||
printf("%s ^ %s = %s\n", zpbitsout(s[i]), zpbitsout(s[j]),
|
||||
zpbitsout(bitxor(s[i], s[j])));
|
||||
|
||||
printf("\nLOGICAL NOT:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
printf("~%s = %s\n", zpbitsout(s[i]), zpbitsout(bitnot(s[i])));
|
||||
|
||||
|
||||
printf ("\nSHIFT LEFT:\n");
|
||||
for (i=0; i<numb; i++) {
|
||||
printf("%s\n",zpbitsout(s[i]));
|
||||
for (j=0; j<=VARBITLEN(s[i]); j++)
|
||||
printf("\t%3d\t%s\n",j,zpbitsout(bitshiftleft(s[i],j)));
|
||||
}
|
||||
printf("\nSHIFT LEFT:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
{
|
||||
printf("%s\n", zpbitsout(s[i]));
|
||||
for (j = 0; j <= VARBITLEN(s[i]); j++)
|
||||
printf("\t%3d\t%s\n", j, zpbitsout(bitshiftleft(s[i], j)));
|
||||
}
|
||||
|
||||
printf ("\nSHIFT RIGHT:\n");
|
||||
for (i=0; i<numb; i++) {
|
||||
printf("%s\n",zpbitsout(s[i]));
|
||||
for (j=0; j<=VARBITLEN(s[i]); j++)
|
||||
printf("\t%3d\t%s\n",j,zpbitsout(bitshiftright(s[i],j)));
|
||||
}
|
||||
printf("\nSHIFT RIGHT:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
{
|
||||
printf("%s\n", zpbitsout(s[i]));
|
||||
for (j = 0; j <= VARBITLEN(s[i]); j++)
|
||||
printf("\t%3d\t%s\n", j, zpbitsout(bitshiftright(s[i], j)));
|
||||
}
|
||||
|
||||
printf ("\n\n ********** VARYING **********\n");
|
||||
for (i=0; i<numb; i++) {
|
||||
printf ("Input: %s\n",b[i]);
|
||||
s[i] = varbitin(b[i], 0, atttypmod[i]);
|
||||
/*print_details(s);*/
|
||||
printf ("%s\n",zpbitout(s[i]));
|
||||
printf ("%s\n",zpbitsout(s[i]));
|
||||
}
|
||||
printf("\n\n ********** VARYING **********\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
{
|
||||
printf("Input: %s\n", b[i]);
|
||||
s[i] = varbitin(b[i], 0, atttypmod[i]);
|
||||
/* print_details(s); */
|
||||
printf("%s\n", zpbitout(s[i]));
|
||||
printf("%s\n", zpbitsout(s[i]));
|
||||
}
|
||||
|
||||
printf ("\nCOMPARISONS:\n");
|
||||
for (i=0; i<numb; i++)
|
||||
for (j=i+1; j<numb; j++)
|
||||
printf("%s <=> %s = %d\n",zpbitsout(s[i]),zpbitsout(s[j]),
|
||||
bitcmp(s[i],s[j]));
|
||||
printf("\nCOMPARISONS:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
for (j = i + 1; j < numb; j++)
|
||||
printf("%s <=> %s = %d\n", zpbitsout(s[i]), zpbitsout(s[j]),
|
||||
bitcmp(s[i], s[j]));
|
||||
|
||||
printf ("\nCONCATENATION:\n");
|
||||
for (i=0; i<numb; i++)
|
||||
for (j=i+1; j<numb; j++)
|
||||
printf("%s || %s = %s\n",zpbitsout(s[i]),zpbitsout(s[j]),
|
||||
zpbitsout(bitcat(s[i],s[j])));
|
||||
printf("\nCONCATENATION:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
for (j = i + 1; j < numb; j++)
|
||||
printf("%s || %s = %s\n", zpbitsout(s[i]), zpbitsout(s[j]),
|
||||
zpbitsout(bitcat(s[i], s[j])));
|
||||
|
||||
printf("\nSUBSTR:\n");
|
||||
printf("%s (%d,%d) => %s\n",zpbitsout(s[3]),1,8,
|
||||
zpbitsout(bitsubstr(s[3],1,8)));
|
||||
printf("%s (%d,%d) => %s\n",zpbitsout(s[3]),9,8,
|
||||
zpbitsout(bitsubstr(s[3],9,8)));
|
||||
printf("%s (%d,%d) => %s\n",zpbitsout(s[3]),1,9,
|
||||
zpbitsout(bitsubstr(s[3],1,9)));
|
||||
printf("%s (%d,%d) => %s\n",zpbitsout(s[3]),3,5,
|
||||
zpbitsout(bitsubstr(s[3],3,5)));
|
||||
printf("%s (%d,%d) => %s\n",zpbitsout(s[3]),3,9,
|
||||
zpbitsout(bitsubstr(s[3],3,9)));
|
||||
printf("%s (%d,%d) => %s (%s)\n",zpbitsout(s[3]),3,17,
|
||||
zpbitsout(bitsubstr(s[3],3,17)),zpbitsout(bitsubstr(s[3],3,17)));
|
||||
printf ("\nLOGICAL AND:\n");
|
||||
for (i=0; i<numb; i++)
|
||||
for (j=i+1; j<numb; j++)
|
||||
printf("%s & %s = %s\n",zpbitsout(s[i]),zpbitsout(s[j]),
|
||||
zpbitsout(bitand(s[i],s[j])));
|
||||
printf("\nSUBSTR:\n");
|
||||
printf("%s (%d,%d) => %s\n", zpbitsout(s[3]), 1, 8,
|
||||
zpbitsout(bitsubstr(s[3], 1, 8)));
|
||||
printf("%s (%d,%d) => %s\n", zpbitsout(s[3]), 9, 8,
|
||||
zpbitsout(bitsubstr(s[3], 9, 8)));
|
||||
printf("%s (%d,%d) => %s\n", zpbitsout(s[3]), 1, 9,
|
||||
zpbitsout(bitsubstr(s[3], 1, 9)));
|
||||
printf("%s (%d,%d) => %s\n", zpbitsout(s[3]), 3, 5,
|
||||
zpbitsout(bitsubstr(s[3], 3, 5)));
|
||||
printf("%s (%d,%d) => %s\n", zpbitsout(s[3]), 3, 9,
|
||||
zpbitsout(bitsubstr(s[3], 3, 9)));
|
||||
printf("%s (%d,%d) => %s (%s)\n", zpbitsout(s[3]), 3, 17,
|
||||
zpbitsout(bitsubstr(s[3], 3, 17)), zpbitsout(bitsubstr(s[3], 3, 17)));
|
||||
printf("\nLOGICAL AND:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
for (j = i + 1; j < numb; j++)
|
||||
printf("%s & %s = %s\n", zpbitsout(s[i]), zpbitsout(s[j]),
|
||||
zpbitsout(bitand(s[i], s[j])));
|
||||
|
||||
printf ("\nLOGICAL OR:\n");
|
||||
for (i=0; i<numb; i++)
|
||||
for (j=i+1; j<numb; j++)
|
||||
printf("%s | %s = %s\n",zpbitsout(s[i]),zpbitsout(s[j]),
|
||||
zpbitsout(bitor(s[i],s[j])));
|
||||
printf("\nLOGICAL OR:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
for (j = i + 1; j < numb; j++)
|
||||
printf("%s | %s = %s\n", zpbitsout(s[i]), zpbitsout(s[j]),
|
||||
zpbitsout(bitor(s[i], s[j])));
|
||||
|
||||
printf ("\nLOGICAL XOR:\n");
|
||||
for (i=0; i<numb; i++)
|
||||
for (j=i+1; j<numb; j++)
|
||||
printf("%s ^ %s = %s\n",zpbitsout(s[i]),zpbitsout(s[j]),
|
||||
zpbitsout(bitxor(s[i],s[j])));
|
||||
|
||||
printf ("\nLOGICAL NOT:\n");
|
||||
for (i=0; i<numb; i++)
|
||||
printf("~%s = %s\n",zpbitsout(s[i]),zpbitsout(bitnot(s[i])));
|
||||
printf("\nLOGICAL XOR:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
for (j = i + 1; j < numb; j++)
|
||||
printf("%s ^ %s = %s\n", zpbitsout(s[i]), zpbitsout(s[j]),
|
||||
zpbitsout(bitxor(s[i], s[j])));
|
||||
|
||||
printf("\nLOGICAL NOT:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
printf("~%s = %s\n", zpbitsout(s[i]), zpbitsout(bitnot(s[i])));
|
||||
|
||||
|
||||
printf ("\nSHIFT LEFT:\n");
|
||||
for (i=0; i<numb; i++) {
|
||||
printf("%s\n",zpbitsout(s[i]));
|
||||
for (j=0; j<=VARBITLEN(s[i]); j++)
|
||||
printf("\t%3d\t%s\n",j,zpbitsout(bitshiftleft(s[i],j)));
|
||||
}
|
||||
printf("\nSHIFT LEFT:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
{
|
||||
printf("%s\n", zpbitsout(s[i]));
|
||||
for (j = 0; j <= VARBITLEN(s[i]); j++)
|
||||
printf("\t%3d\t%s\n", j, zpbitsout(bitshiftleft(s[i], j)));
|
||||
}
|
||||
|
||||
printf ("\nSHIFT RIGHT:\n");
|
||||
for (i=0; i<numb; i++) {
|
||||
printf("%s\n",zpbitsout(s[i]));
|
||||
for (j=0; j<=VARBITLEN(s[i]); j++)
|
||||
printf("\t%3d\t%s\n",j,zpbitsout(bitshiftright(s[i],j)));
|
||||
}
|
||||
printf("\nSHIFT RIGHT:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
{
|
||||
printf("%s\n", zpbitsout(s[i]));
|
||||
for (j = 0; j <= VARBITLEN(s[i]); j++)
|
||||
printf("\t%3d\t%s\n", j, zpbitsout(bitshiftright(s[i], j)));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@
|
|||
* Decode time string 00:00:00 through 24:00:00.
|
||||
*/
|
||||
static int
|
||||
decode_24h_time(char *str, struct tm *tm, double *fsec)
|
||||
decode_24h_time(char *str, struct tm * tm, double *fsec)
|
||||
{
|
||||
char *cp;
|
||||
|
||||
|
@ -51,9 +51,7 @@ decode_24h_time(char *str, struct tm *tm, double *fsec)
|
|||
*fsec = 0;
|
||||
}
|
||||
else if (*cp != ':')
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
else
|
||||
{
|
||||
str = cp + 1;
|
||||
|
@ -72,10 +70,10 @@ decode_24h_time(char *str, struct tm *tm, double *fsec)
|
|||
}
|
||||
|
||||
/* do a sanity check */
|
||||
if ( (tm->tm_hour < 0) || (tm->tm_hour > 24)
|
||||
|| (tm->tm_min < 0) || (tm->tm_min > 59)
|
||||
|| (tm->tm_sec < 0) || (tm->tm_sec > 59)
|
||||
|| (*fsec < 0) )
|
||||
if ((tm->tm_hour < 0) || (tm->tm_hour > 24)
|
||||
|| (tm->tm_min < 0) || (tm->tm_min > 59)
|
||||
|| (tm->tm_sec < 0) || (tm->tm_sec > 59)
|
||||
|| (*fsec < 0))
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
|
@ -265,7 +263,7 @@ currentdate()
|
|||
int4
|
||||
date2mjd(DateADT val)
|
||||
{
|
||||
int result;
|
||||
int result;
|
||||
|
||||
result = val + JDATE_2000 - 2400000.5;
|
||||
|
||||
|
@ -276,8 +274,8 @@ date2mjd(DateADT val)
|
|||
|
||||
/*
|
||||
* Local Variables:
|
||||
* tab-width: 4
|
||||
* c-indent-level: 4
|
||||
* c-basic-offset: 4
|
||||
* tab-width: 4
|
||||
* c-indent-level: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
*/
|
||||
|
|
|
@ -20,8 +20,8 @@ DateADT currentdate(void);
|
|||
|
||||
/*
|
||||
* Local Variables:
|
||||
* tab-width: 4
|
||||
* c-indent-level: 4
|
||||
* c-basic-offset: 4
|
||||
* tab-width: 4
|
||||
* c-indent-level: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
*/
|
||||
|
|
|
@ -38,6 +38,7 @@ extern int assertTest(int val);
|
|||
|
||||
#ifdef ASSERT_CHECKING_TEST
|
||||
extern int assertEnable(int val);
|
||||
|
||||
#endif
|
||||
|
||||
int
|
||||
|
@ -84,7 +85,8 @@ active_listeners(text *relname)
|
|||
ScanKeyData key;
|
||||
Datum d;
|
||||
bool isnull;
|
||||
int len, pid;
|
||||
int len,
|
||||
pid;
|
||||
int count = 0;
|
||||
int ourpid = getpid();
|
||||
char listen_name[NAMEDATALEN];
|
||||
|
@ -92,8 +94,9 @@ active_listeners(text *relname)
|
|||
lRel = heap_openr(ListenerRelationName, AccessShareLock);
|
||||
tdesc = RelationGetDescr(lRel);
|
||||
|
||||
if (relname && (VARSIZE(relname) > VARHDRSZ)) {
|
||||
len = MIN(VARSIZE(relname)-VARHDRSZ, NAMEDATALEN-1);
|
||||
if (relname && (VARSIZE(relname) > VARHDRSZ))
|
||||
{
|
||||
len = MIN(VARSIZE(relname) - VARHDRSZ, NAMEDATALEN - 1);
|
||||
strncpy(listen_name, VARDATA(relname), len);
|
||||
listen_name[len] = '\0';
|
||||
ScanKeyEntryInitialize(&key, 0,
|
||||
|
@ -101,15 +104,16 @@ active_listeners(text *relname)
|
|||
F_NAMEEQ,
|
||||
PointerGetDatum(listen_name));
|
||||
sRel = heap_beginscan(lRel, 0, SnapshotNow, 1, &key);
|
||||
} else {
|
||||
sRel = heap_beginscan(lRel, 0, SnapshotNow, 0, (ScanKey)NULL);
|
||||
}
|
||||
else
|
||||
sRel = heap_beginscan(lRel, 0, SnapshotNow, 0, (ScanKey) NULL);
|
||||
|
||||
while (HeapTupleIsValid(lTuple = heap_getnext(sRel, 0)))
|
||||
{
|
||||
d = heap_getattr(lTuple, Anum_pg_listener_pid, tdesc, &isnull);
|
||||
pid = DatumGetInt32(d);
|
||||
if ((pid == ourpid) || (kill(pid, SIGTSTP) == 0)) {
|
||||
if ((pid == ourpid) || (kill(pid, SIGTSTP) == 0))
|
||||
{
|
||||
/* elog(NOTICE, "%d ok", pid); */
|
||||
count++;
|
||||
}
|
||||
|
@ -134,6 +138,7 @@ assert_test(int val)
|
|||
{
|
||||
return assertTest(val);
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
@ -141,8 +146,8 @@ assert_test(int val)
|
|||
|
||||
/*
|
||||
* Local Variables:
|
||||
* tab-width: 4
|
||||
* c-indent-level: 4
|
||||
* c-basic-offset: 4
|
||||
* tab-width: 4
|
||||
* c-indent-level: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
*/
|
||||
|
|
|
@ -10,8 +10,10 @@ int active_listeners(text *relname);
|
|||
|
||||
#ifdef USE_ASSERT_CHECKING
|
||||
int assert_enable(int val);
|
||||
|
||||
#ifdef ASSERT_CHECKING_TEST
|
||||
int assert_test(int val);
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
@ -19,8 +21,8 @@ int assert_test(int val);
|
|||
|
||||
/*
|
||||
* Local Variables:
|
||||
* tab-width: 4
|
||||
* c-indent-level: 4
|
||||
* c-basic-offset: 4
|
||||
* tab-width: 4
|
||||
* c-indent-level: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
*/
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
* DEF_PGPORT is the TCP port number on which the Postmaster listens by
|
||||
* default. This can be overriden by command options, environment variables,
|
||||
* and the postconfig hook. (set by build script)
|
||||
*/
|
||||
*/
|
||||
|
||||
#define DEF_PGPORT "5432"
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -23,7 +23,7 @@
|
|||
#define ISO8859
|
||||
|
||||
#define MIN(x, y) ((x) < (y) ? (x) : (y))
|
||||
#define VALUE(char) ((char) - '0')
|
||||
#define VALUE(char) ((char) - '0')
|
||||
#define DIGIT(val) ((val) + '0')
|
||||
#define ISOCTAL(c) (((c) >= '0') && ((c) <= '7'))
|
||||
#ifndef ISO8859
|
||||
|
@ -89,9 +89,8 @@ string_output(unsigned char *data, int size)
|
|||
break;
|
||||
case '{':
|
||||
/* Escape beginning of string, to distinguish from arrays */
|
||||
if (p == data) {
|
||||
if (p == data)
|
||||
len++;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
if (NOTPRINTABLE(*p))
|
||||
|
@ -137,9 +136,8 @@ string_output(unsigned char *data, int size)
|
|||
break;
|
||||
case '{':
|
||||
/* Escape beginning of string, to distinguish from arrays */
|
||||
if (p == data) {
|
||||
if (p == data)
|
||||
*r++ = '\\';
|
||||
}
|
||||
*r++ = c;
|
||||
break;
|
||||
default:
|
||||
|
@ -361,14 +359,15 @@ c_charin(unsigned char *str)
|
|||
{
|
||||
return (string_input(str, 1, 0, NULL));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* end of file */
|
||||
|
||||
/*
|
||||
* Local Variables:
|
||||
* tab-width: 4
|
||||
* c-indent-level: 4
|
||||
* c-basic-offset: 4
|
||||
* tab-width: 4
|
||||
* c-indent-level: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
*/
|
||||
|
|
|
@ -1,24 +1,25 @@
|
|||
#ifndef STRING_IO_H
|
||||
#define STRING_IO_H
|
||||
|
||||
unsigned char* string_output(unsigned char *data, int size);
|
||||
unsigned char* string_input(unsigned char *str, int size, int hdrsize,
|
||||
int *rtn_size);
|
||||
unsigned char* c_charout(int32 c);
|
||||
unsigned char* c_textout(struct varlena * vlena);
|
||||
unsigned char* c_varcharout(unsigned char *s);
|
||||
unsigned char *string_output(unsigned char *data, int size);
|
||||
unsigned char *string_input(unsigned char *str, int size, int hdrsize,
|
||||
int *rtn_size);
|
||||
unsigned char *c_charout(int32 c);
|
||||
unsigned char *c_textout(struct varlena * vlena);
|
||||
unsigned char *c_varcharout(unsigned char *s);
|
||||
|
||||
#if 0
|
||||
struct varlena* c_textin(unsigned char *str);
|
||||
int32* c_charin(unsigned char *str)
|
||||
struct varlena *c_textin(unsigned char *str);
|
||||
int32 *
|
||||
c_charin(unsigned char *str)
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Local Variables:
|
||||
* tab-width: 4
|
||||
* c-indent-level: 4
|
||||
* c-basic-offset: 4
|
||||
* tab-width: 4
|
||||
* c-indent-level: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
*/
|
||||
|
|
|
@ -96,8 +96,8 @@ user_unlock_all()
|
|||
|
||||
/*
|
||||
* Local Variables:
|
||||
* tab-width: 4
|
||||
* c-indent-level: 4
|
||||
* c-basic-offset: 4
|
||||
* tab-width: 4
|
||||
* c-indent-level: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
*/
|
||||
|
|
|
@ -13,8 +13,8 @@ int user_unlock_all(void);
|
|||
|
||||
/*
|
||||
* Local Variables:
|
||||
* tab-width: 4
|
||||
* c-indent-level: 4
|
||||
* c-basic-offset: 4
|
||||
* tab-width: 4
|
||||
* c-indent-level: 4
|
||||
* c-basic-offset: 4
|
||||
* End:
|
||||
*/
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.61 2000/01/26 05:55:53 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.62 2000/04/12 17:14:36 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* The old interface functions have been converted to macros
|
||||
|
@ -137,9 +137,9 @@ DataFill(char *data,
|
|||
*((int32 *) value[i]));
|
||||
break;
|
||||
default:
|
||||
Assert(att[i]->attlen >= 0);
|
||||
memmove(data, DatumGetPointer(value[i]),
|
||||
(size_t)(att[i]->attlen));
|
||||
Assert(att[i]->attlen >= 0);
|
||||
memmove(data, DatumGetPointer(value[i]),
|
||||
(size_t) (att[i]->attlen));
|
||||
break;
|
||||
}
|
||||
data = (char *) att_addlength((long) data, att[i]->attlen, value[i]);
|
||||
|
@ -326,7 +326,7 @@ nocachegetattr(HeapTuple tuple,
|
|||
Form_pg_attribute *att = tupleDesc->attrs;
|
||||
int slow = 0; /* do we have to walk nulls? */
|
||||
|
||||
(void)isnull; /*not used*/
|
||||
(void) isnull; /* not used */
|
||||
#ifdef IN_MACRO
|
||||
/* This is handled in the macro */
|
||||
Assert(attnum > 0);
|
||||
|
@ -681,7 +681,7 @@ heap_formtuple(TupleDesc tupleDescriptor,
|
|||
len += bitmaplen;
|
||||
}
|
||||
|
||||
hoff = len = MAXALIGN(len); /* be conservative here */
|
||||
hoff = len = MAXALIGN(len); /* be conservative here */
|
||||
|
||||
len += ComputeDataSize(tupleDescriptor, value, nulls);
|
||||
|
||||
|
@ -806,11 +806,9 @@ void
|
|||
heap_freetuple(HeapTuple htup)
|
||||
{
|
||||
if (htup->t_data != NULL)
|
||||
if (htup->t_datamcxt != NULL && (char *)(htup->t_data) !=
|
||||
((char *) htup + HEAPTUPLESIZE))
|
||||
{
|
||||
if (htup->t_datamcxt != NULL && (char *) (htup->t_data) !=
|
||||
((char *) htup + HEAPTUPLESIZE))
|
||||
elog(NOTICE, "TELL Jan Wieck: heap_freetuple() found separate t_data");
|
||||
}
|
||||
|
||||
pfree(htup);
|
||||
}
|
||||
|
@ -835,7 +833,7 @@ heap_addheader(uint32 natts, /* max domain index */
|
|||
|
||||
len = offsetof(HeapTupleHeaderData, t_bits);
|
||||
|
||||
hoff = len = MAXALIGN(len); /* be conservative */
|
||||
hoff = len = MAXALIGN(len); /* be conservative */
|
||||
len += structlen;
|
||||
tuple = (HeapTuple) palloc(HEAPTUPLESIZE + len);
|
||||
tuple->t_datamcxt = CurrentMemoryContext;
|
||||
|
@ -850,8 +848,8 @@ heap_addheader(uint32 natts, /* max domain index */
|
|||
td->t_infomask = 0;
|
||||
td->t_infomask |= HEAP_XMAX_INVALID;
|
||||
|
||||
if (structlen > 0)
|
||||
memmove((char *) td + hoff, structure, (size_t)structlen);
|
||||
if (structlen > 0)
|
||||
memmove((char *) td + hoff, structure, (size_t) structlen);
|
||||
|
||||
return tuple;
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.42 2000/01/26 05:55:53 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.43 2000/04/12 17:14:37 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -60,7 +60,7 @@ index_formtuple(TupleDesc tupleDescriptor,
|
|||
|
||||
hoff = IndexInfoFindDataOffset(infomask);
|
||||
size = hoff + ComputeDataSize(tupleDescriptor, value, null);
|
||||
size = MAXALIGN(size); /* be conservative */
|
||||
size = MAXALIGN(size); /* be conservative */
|
||||
|
||||
tp = (char *) palloc(size);
|
||||
tuple = (IndexTuple) tp;
|
||||
|
@ -134,7 +134,7 @@ nocache_index_getattr(IndexTuple tup,
|
|||
int data_off; /* tuple data offset */
|
||||
Form_pg_attribute *att = tupleDesc->attrs;
|
||||
|
||||
(void)isnull;
|
||||
(void) isnull;
|
||||
/* ----------------
|
||||
* sanity checks
|
||||
* ----------------
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.61 2000/01/31 04:35:48 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.62 2000/04/12 17:14:37 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* some of the executor utility code such as "ExecTypeFromTL" should be
|
||||
|
@ -229,17 +229,19 @@ FreeTupleDesc(TupleDesc tupdesc)
|
|||
bool
|
||||
equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
|
||||
{
|
||||
int i;
|
||||
int i;
|
||||
|
||||
if (tupdesc1->natts != tupdesc2->natts)
|
||||
return false;
|
||||
for (i = 0; i < tupdesc1->natts; i++)
|
||||
{
|
||||
Form_pg_attribute attr1 = tupdesc1->attrs[i];
|
||||
Form_pg_attribute attr2 = tupdesc2->attrs[i];
|
||||
Form_pg_attribute attr1 = tupdesc1->attrs[i];
|
||||
Form_pg_attribute attr2 = tupdesc2->attrs[i];
|
||||
|
||||
/* We do not need to check every single field here, and in fact
|
||||
* some fields such as attdisbursion probably shouldn't be compared.
|
||||
/*
|
||||
* We do not need to check every single field here, and in fact
|
||||
* some fields such as attdisbursion probably shouldn't be
|
||||
* compared.
|
||||
*/
|
||||
if (strcmp(NameStr(attr1->attname), NameStr(attr2->attname)) != 0)
|
||||
return false;
|
||||
|
@ -254,8 +256,8 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
|
|||
}
|
||||
if (tupdesc1->constr != NULL)
|
||||
{
|
||||
TupleConstr *constr1 = tupdesc1->constr;
|
||||
TupleConstr *constr2 = tupdesc2->constr;
|
||||
TupleConstr *constr1 = tupdesc1->constr;
|
||||
TupleConstr *constr2 = tupdesc2->constr;
|
||||
|
||||
if (constr2 == NULL)
|
||||
return false;
|
||||
|
@ -263,8 +265,8 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
|
|||
return false;
|
||||
for (i = 0; i < (int) constr1->num_defval; i++)
|
||||
{
|
||||
AttrDefault *defval1 = constr1->defval + i;
|
||||
AttrDefault *defval2 = constr2->defval + i;
|
||||
AttrDefault *defval1 = constr1->defval + i;
|
||||
AttrDefault *defval2 = constr2->defval + i;
|
||||
|
||||
if (defval1->adnum != defval2->adnum)
|
||||
return false;
|
||||
|
@ -275,8 +277,8 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
|
|||
return false;
|
||||
for (i = 0; i < (int) constr1->num_check; i++)
|
||||
{
|
||||
ConstrCheck *check1 = constr1->check + i;
|
||||
ConstrCheck *check2 = constr2->check + i;
|
||||
ConstrCheck *check1 = constr1->check + i;
|
||||
ConstrCheck *check2 = constr2->check + i;
|
||||
|
||||
if (strcmp(check1->ccname, check2->ccname) != 0)
|
||||
return false;
|
||||
|
@ -585,8 +587,9 @@ BuildDescForRelation(List *schema, char *relname)
|
|||
constr->has_not_null = true;
|
||||
desc->attrs[attnum - 1]->attnotnull = entry->is_not_null;
|
||||
|
||||
/* Note we copy only pre-cooked default expressions.
|
||||
* Digestion of raw ones is someone else's problem.
|
||||
/*
|
||||
* Note we copy only pre-cooked default expressions. Digestion of
|
||||
* raw ones is someone else's problem.
|
||||
*/
|
||||
if (entry->cooked_default != NULL)
|
||||
{
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.52 2000/03/17 02:36:00 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.53 2000/04/12 17:14:39 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -52,8 +52,10 @@ void gistdelete(Relation r, ItemPointer tid);
|
|||
static IndexTuple gist_tuple_replacekey(Relation r, GISTENTRY entry, IndexTuple t);
|
||||
static void gistcentryinit(GISTSTATE *giststate, GISTENTRY *e, char *pr,
|
||||
Relation r, Page pg, OffsetNumber o, int b, bool l);
|
||||
|
||||
#ifdef GISTDEBUG
|
||||
static char *int_range_out(INTRANGE *r);
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -98,7 +100,7 @@ gistbuild(Relation heap,
|
|||
|
||||
/* no locking is needed */
|
||||
|
||||
CommandCounterIncrement(); /* so we can see the new pg_index tuple */
|
||||
CommandCounterIncrement(); /* so we can see the new pg_index tuple */
|
||||
|
||||
initGISTstate(&giststate, index);
|
||||
|
||||
|
@ -186,7 +188,7 @@ gistbuild(Relation heap,
|
|||
#ifndef OMIT_PARTIAL_INDEX
|
||||
/* SetSlotContents(slot, htup); */
|
||||
slot->val = htup;
|
||||
if (! ExecQual((List *) pred, econtext, false))
|
||||
if (!ExecQual((List *) pred, econtext, false))
|
||||
continue;
|
||||
#endif /* OMIT_PARTIAL_INDEX */
|
||||
}
|
||||
|
@ -272,18 +274,18 @@ gistbuild(Relation heap,
|
|||
/*
|
||||
* Since we just counted the tuples in the heap, we update its stats
|
||||
* in pg_class to guarantee that the planner takes advantage of the
|
||||
* index we just created. But, only update statistics during
|
||||
* normal index definitions, not for indices on system catalogs
|
||||
* created during bootstrap processing. We must close the relations
|
||||
* before updating statistics to guarantee that the relcache entries
|
||||
* are flushed when we increment the command counter in UpdateStats().
|
||||
* But we do not release any locks on the relations; those will be
|
||||
* held until end of transaction.
|
||||
* index we just created. But, only update statistics during normal
|
||||
* index definitions, not for indices on system catalogs created
|
||||
* during bootstrap processing. We must close the relations before
|
||||
* updating statistics to guarantee that the relcache entries are
|
||||
* flushed when we increment the command counter in UpdateStats(). But
|
||||
* we do not release any locks on the relations; those will be held
|
||||
* until end of transaction.
|
||||
*/
|
||||
if (IsNormalProcessingMode())
|
||||
{
|
||||
Oid hrelid = RelationGetRelid(heap);
|
||||
Oid irelid = RelationGetRelid(index);
|
||||
Oid hrelid = RelationGetRelid(heap);
|
||||
Oid irelid = RelationGetRelid(index);
|
||||
bool inplace = IsReindexProcessing();
|
||||
|
||||
heap_close(heap, NoLock);
|
||||
|
|
|
@ -266,7 +266,7 @@ gistdropscan(IndexScanDesc s)
|
|||
prev = l;
|
||||
|
||||
if (l == (GISTScanList) NULL)
|
||||
elog(ERROR, "GiST scan list corrupted -- cannot find 0x%p", (void*)s);
|
||||
elog(ERROR, "GiST scan list corrupted -- cannot find 0x%p", (void *) s);
|
||||
|
||||
if (prev == (GISTScanList) NULL)
|
||||
GISTScans = l->gsl_next;
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.36 2000/03/01 05:39:22 inoue Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.37 2000/04/12 17:14:43 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* This file contains only the public interface routines.
|
||||
|
@ -149,7 +149,7 @@ hashbuild(Relation heap,
|
|||
#ifndef OMIT_PARTIAL_INDEX
|
||||
/* SetSlotContents(slot, htup); */
|
||||
slot->val = htup;
|
||||
if (! ExecQual((List *) pred, econtext, false))
|
||||
if (!ExecQual((List *) pred, econtext, false))
|
||||
continue;
|
||||
#endif /* OMIT_PARTIAL_INDEX */
|
||||
}
|
||||
|
@ -230,18 +230,18 @@ hashbuild(Relation heap,
|
|||
/*
|
||||
* Since we just counted the tuples in the heap, we update its stats
|
||||
* in pg_class to guarantee that the planner takes advantage of the
|
||||
* index we just created. But, only update statistics during
|
||||
* normal index definitions, not for indices on system catalogs
|
||||
* created during bootstrap processing. We must close the relations
|
||||
* before updating statistics to guarantee that the relcache entries
|
||||
* are flushed when we increment the command counter in UpdateStats().
|
||||
* But we do not release any locks on the relations; those will be
|
||||
* held until end of transaction.
|
||||
* index we just created. But, only update statistics during normal
|
||||
* index definitions, not for indices on system catalogs created
|
||||
* during bootstrap processing. We must close the relations before
|
||||
* updating statistics to guarantee that the relcache entries are
|
||||
* flushed when we increment the command counter in UpdateStats(). But
|
||||
* we do not release any locks on the relations; those will be held
|
||||
* until end of transaction.
|
||||
*/
|
||||
if (IsNormalProcessingMode())
|
||||
{
|
||||
Oid hrelid = RelationGetRelid(heap);
|
||||
Oid irelid = RelationGetRelid(index);
|
||||
Oid hrelid = RelationGetRelid(heap);
|
||||
Oid irelid = RelationGetRelid(index);
|
||||
bool inplace = IsReindexProcessing();
|
||||
|
||||
heap_close(heap, NoLock);
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.24 2000/02/21 03:36:46 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.25 2000/04/12 17:14:44 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* These functions are stored in pg_amproc. For each operator class
|
||||
|
@ -146,14 +146,14 @@ hashoidvector(Oid *key)
|
|||
int i;
|
||||
uint32 result = 0;
|
||||
|
||||
for (i = INDEX_MAX_KEYS; --i >= 0; )
|
||||
for (i = INDEX_MAX_KEYS; --i >= 0;)
|
||||
result = (result << 1) ^ (~(uint32) key[i]);
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: hashint2vector currently can't be used as a user hash table
|
||||
* hash function, because it has no pg_proc entry. We only need it
|
||||
* hash function, because it has no pg_proc entry. We only need it
|
||||
* for catcache indexing.
|
||||
*/
|
||||
uint32
|
||||
|
@ -162,7 +162,7 @@ hashint2vector(int16 *key)
|
|||
int i;
|
||||
uint32 result = 0;
|
||||
|
||||
for (i = INDEX_MAX_KEYS; --i >= 0; )
|
||||
for (i = INDEX_MAX_KEYS; --i >= 0;)
|
||||
result = (result << 1) ^ (~(uint32) key[i]);
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashscan.c,v 1.22 2000/01/26 05:55:55 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashscan.c,v 1.23 2000/04/12 17:14:44 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* Because we can be doing an index scan on a relation while we
|
||||
|
@ -75,7 +75,7 @@ _hash_dropscan(IndexScanDesc scan)
|
|||
last = chk;
|
||||
|
||||
if (chk == (HashScanList) NULL)
|
||||
elog(ERROR, "hash scan list trashed; can't find 0x%p", (void*)scan);
|
||||
elog(ERROR, "hash scan list trashed; can't find 0x%p", (void *) scan);
|
||||
|
||||
if (last == (HashScanList) NULL)
|
||||
HashScans = chk->hashsl_next;
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashsearch.c,v 1.23 2000/03/17 02:36:02 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashsearch.c,v 1.24 2000/04/12 17:14:44 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -351,7 +351,7 @@ _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir, Buffer metabuf)
|
|||
opaque = (HashPageOpaque) PageGetSpecialPointer(page);
|
||||
Assert(opaque->hasho_bucket == bucket);
|
||||
while (PageIsEmpty(page) &&
|
||||
BlockNumberIsValid(opaque->hasho_nextblkno))
|
||||
BlockNumberIsValid(opaque->hasho_nextblkno))
|
||||
_hash_readnext(rel, &buf, &page, &opaque);
|
||||
maxoff = PageGetMaxOffsetNumber(page);
|
||||
offnum = FirstOffsetNumber;
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.66 2000/02/09 03:49:47 inoue Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.67 2000/04/12 17:14:45 momjian Exp $
|
||||
*
|
||||
*
|
||||
* INTERFACE ROUTINES
|
||||
|
@ -23,7 +23,7 @@
|
|||
* heap_fetch - retrive tuple with tid
|
||||
* heap_insert - insert tuple into a relation
|
||||
* heap_delete - delete a tuple from a relation
|
||||
* heap_update - replace a tuple in a relation with another tuple
|
||||
* heap_update - replace a tuple in a relation with another tuple
|
||||
* heap_markpos - mark scan position
|
||||
* heap_restrpos - restore position to marked location
|
||||
*
|
||||
|
@ -120,9 +120,9 @@ initscan(HeapScanDesc scan,
|
|||
* ----------------
|
||||
*/
|
||||
scan->rs_ntup.t_datamcxt = scan->rs_ctup.t_datamcxt =
|
||||
scan->rs_ptup.t_datamcxt = NULL;
|
||||
scan->rs_ptup.t_datamcxt = NULL;
|
||||
scan->rs_ntup.t_data = scan->rs_ctup.t_data =
|
||||
scan->rs_ptup.t_data = NULL;
|
||||
scan->rs_ptup.t_data = NULL;
|
||||
scan->rs_nbuf = scan->rs_cbuf = scan->rs_pbuf = InvalidBuffer;
|
||||
}
|
||||
else if (atend)
|
||||
|
@ -188,8 +188,9 @@ unpinscan(HeapScanDesc scan)
|
|||
if (BufferIsValid(scan->rs_nbuf))
|
||||
ReleaseBuffer(scan->rs_nbuf);
|
||||
|
||||
/* we don't bother to clear rs_pbuf etc --- caller must
|
||||
* reinitialize them if scan descriptor is not being deleted.
|
||||
/*
|
||||
* we don't bother to clear rs_pbuf etc --- caller must reinitialize
|
||||
* them if scan descriptor is not being deleted.
|
||||
*/
|
||||
}
|
||||
|
||||
|
@ -544,7 +545,7 @@ heap_open(Oid relationId, LOCKMODE lockmode)
|
|||
if (lockmode == NoLock)
|
||||
return r; /* caller must check RelationIsValid! */
|
||||
|
||||
if (! RelationIsValid(r))
|
||||
if (!RelationIsValid(r))
|
||||
elog(ERROR, "Relation %u does not exist", relationId);
|
||||
|
||||
LockRelation(r, lockmode);
|
||||
|
@ -586,7 +587,7 @@ heap_openr(const char *relationName, LOCKMODE lockmode)
|
|||
if (lockmode == NoLock)
|
||||
return r; /* caller must check RelationIsValid! */
|
||||
|
||||
if (! RelationIsValid(r))
|
||||
if (!RelationIsValid(r))
|
||||
elog(ERROR, "Relation '%s' does not exist", relationName);
|
||||
|
||||
LockRelation(r, lockmode);
|
||||
|
@ -646,7 +647,7 @@ heap_beginscan(Relation relation,
|
|||
* sanity checks
|
||||
* ----------------
|
||||
*/
|
||||
if (! RelationIsValid(relation))
|
||||
if (!RelationIsValid(relation))
|
||||
elog(ERROR, "heap_beginscan: !RelationIsValid(relation)");
|
||||
|
||||
/* ----------------
|
||||
|
@ -659,7 +660,7 @@ heap_beginscan(Relation relation,
|
|||
* Acquire AccessShareLock for the duration of the scan
|
||||
*
|
||||
* Note: we could get an SI inval message here and consequently have
|
||||
* to rebuild the relcache entry. The refcount increment above
|
||||
* to rebuild the relcache entry. The refcount increment above
|
||||
* ensures that we will rebuild it and not just flush it...
|
||||
* ----------------
|
||||
*/
|
||||
|
@ -681,6 +682,7 @@ heap_beginscan(Relation relation,
|
|||
scan->rs_nkeys = (short) nkeys;
|
||||
|
||||
if (nkeys)
|
||||
|
||||
/*
|
||||
* we do this here instead of in initscan() because heap_rescan
|
||||
* also calls initscan() and we don't want to allocate memory
|
||||
|
@ -847,9 +849,7 @@ heap_getnext(HeapScanDesc scandesc, int backw)
|
|||
|
||||
if (scan->rs_ptup.t_data == scan->rs_ctup.t_data &&
|
||||
BufferIsInvalid(scan->rs_pbuf))
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy the "current" tuple/buffer to "next". Pin/unpin the
|
||||
|
@ -1095,8 +1095,10 @@ heap_fetch(Relation relation,
|
|||
}
|
||||
else
|
||||
{
|
||||
/* All checks passed, so return the tuple as valid.
|
||||
* Caller is now responsible for releasing the buffer.
|
||||
|
||||
/*
|
||||
* All checks passed, so return the tuple as valid. Caller is now
|
||||
* responsible for releasing the buffer.
|
||||
*/
|
||||
*userbuf = buffer;
|
||||
}
|
||||
|
@ -1109,17 +1111,18 @@ heap_fetch(Relation relation,
|
|||
*/
|
||||
ItemPointer
|
||||
heap_get_latest_tid(Relation relation,
|
||||
Snapshot snapshot,
|
||||
ItemPointer tid)
|
||||
Snapshot snapshot,
|
||||
ItemPointer tid)
|
||||
{
|
||||
ItemId lp = NULL;
|
||||
Buffer buffer;
|
||||
PageHeader dp;
|
||||
OffsetNumber offnum;
|
||||
HeapTupleData tp;
|
||||
HeapTupleHeader t_data;
|
||||
ItemPointerData ctid;
|
||||
bool invalidBlock,linkend;
|
||||
OffsetNumber offnum;
|
||||
HeapTupleData tp;
|
||||
HeapTupleHeader t_data;
|
||||
ItemPointerData ctid;
|
||||
bool invalidBlock,
|
||||
linkend;
|
||||
|
||||
/* ----------------
|
||||
* get the buffer from the relation descriptor
|
||||
|
@ -1149,11 +1152,11 @@ heap_get_latest_tid(Relation relation,
|
|||
invalidBlock = false;
|
||||
}
|
||||
if (invalidBlock)
|
||||
{
|
||||
{
|
||||
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
|
||||
ReleaseBuffer(buffer);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* ----------------
|
||||
* more sanity checks
|
||||
|
@ -1175,7 +1178,7 @@ heap_get_latest_tid(Relation relation,
|
|||
snapshot, 0, (ScanKey) NULL);
|
||||
|
||||
linkend = true;
|
||||
if ((t_data->t_infomask & HEAP_XMAX_COMMITTED) &&
|
||||
if ((t_data->t_infomask & HEAP_XMAX_COMMITTED) &&
|
||||
!ItemPointerEquals(tid, &ctid))
|
||||
linkend = false;
|
||||
|
||||
|
@ -1186,7 +1189,7 @@ heap_get_latest_tid(Relation relation,
|
|||
{
|
||||
if (linkend)
|
||||
return NULL;
|
||||
return heap_get_latest_tid(relation, snapshot, &ctid);
|
||||
return heap_get_latest_tid(relation, snapshot, &ctid);
|
||||
}
|
||||
|
||||
return tid;
|
||||
|
@ -1300,10 +1303,11 @@ l1:
|
|||
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
if (TransactionIdDidAbort(xwait))
|
||||
goto l1;
|
||||
/*
|
||||
* xwait is committed but if xwait had just marked
|
||||
* the tuple for update then some other xaction could
|
||||
* update this tuple before we got to this point.
|
||||
|
||||
/*
|
||||
* xwait is committed but if xwait had just marked the tuple for
|
||||
* update then some other xaction could update this tuple before
|
||||
* we got to this point.
|
||||
*/
|
||||
if (tp.t_data->t_xmax != xwait)
|
||||
goto l1;
|
||||
|
@ -1345,11 +1349,11 @@ l1:
|
|||
}
|
||||
|
||||
/*
|
||||
* heap_update - replace a tuple
|
||||
* heap_update - replace a tuple
|
||||
*/
|
||||
int
|
||||
heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
|
||||
ItemPointer ctid)
|
||||
ItemPointer ctid)
|
||||
{
|
||||
ItemId lp;
|
||||
HeapTupleData oldtup;
|
||||
|
@ -1396,10 +1400,11 @@ l2:
|
|||
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
if (TransactionIdDidAbort(xwait))
|
||||
goto l2;
|
||||
/*
|
||||
* xwait is committed but if xwait had just marked
|
||||
* the tuple for update then some other xaction could
|
||||
* update this tuple before we got to this point.
|
||||
|
||||
/*
|
||||
* xwait is committed but if xwait had just marked the tuple for
|
||||
* update then some other xaction could update this tuple before
|
||||
* we got to this point.
|
||||
*/
|
||||
if (oldtup.t_data->t_xmax != xwait)
|
||||
goto l2;
|
||||
|
@ -1521,10 +1526,11 @@ l3:
|
|||
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
if (TransactionIdDidAbort(xwait))
|
||||
goto l3;
|
||||
/*
|
||||
* xwait is committed but if xwait had just marked
|
||||
* the tuple for update then some other xaction could
|
||||
* update this tuple before we got to this point.
|
||||
|
||||
/*
|
||||
* xwait is committed but if xwait had just marked the tuple for
|
||||
* update then some other xaction could update this tuple before
|
||||
* we got to this point.
|
||||
*/
|
||||
if (tuple->t_data->t_xmax != xwait)
|
||||
goto l3;
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Id: hio.c,v 1.30 2000/03/17 02:36:02 tgl Exp $
|
||||
* $Id: hio.c,v 1.31 2000/04/12 17:14:45 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -51,7 +51,7 @@ RelationPutHeapTuple(Relation relation,
|
|||
IncrHeapAccessStat(global_RelationPutHeapTuple);
|
||||
|
||||
pageHeader = (Page) BufferGetPage(buffer);
|
||||
len = MAXALIGN(tuple->t_len); /* be conservative */
|
||||
len = MAXALIGN(tuple->t_len); /* be conservative */
|
||||
Assert(len <= PageGetFreeSpace(pageHeader));
|
||||
|
||||
offnum = PageAddItem((Page) pageHeader, (Item) tuple->t_data,
|
||||
|
@ -108,11 +108,11 @@ RelationPutHeapTupleAtEnd(Relation relation, HeapTuple tuple)
|
|||
ItemId itemId;
|
||||
Item item;
|
||||
|
||||
len = MAXALIGN(tuple->t_len); /* be conservative */
|
||||
len = MAXALIGN(tuple->t_len); /* be conservative */
|
||||
|
||||
/*
|
||||
* If we're gonna fail for oversize tuple, do it right away...
|
||||
* this code should go away eventually.
|
||||
* If we're gonna fail for oversize tuple, do it right away... this
|
||||
* code should go away eventually.
|
||||
*/
|
||||
if (len > MaxTupleSize)
|
||||
elog(ERROR, "Tuple is too big: size %u, max size %ld",
|
||||
|
@ -136,8 +136,8 @@ RelationPutHeapTupleAtEnd(Relation relation, HeapTuple tuple)
|
|||
lastblock = RelationGetNumberOfBlocks(relation);
|
||||
|
||||
/*
|
||||
* Get the last existing page --- may need to create the first one
|
||||
* if this is a virgin relation.
|
||||
* Get the last existing page --- may need to create the first one if
|
||||
* this is a virgin relation.
|
||||
*/
|
||||
if (lastblock == 0)
|
||||
{
|
||||
|
@ -168,12 +168,14 @@ RelationPutHeapTupleAtEnd(Relation relation, HeapTuple tuple)
|
|||
|
||||
if (len > PageGetFreeSpace(pageHeader))
|
||||
{
|
||||
|
||||
/*
|
||||
* BUG: by elog'ing here, we leave the new buffer locked and not
|
||||
* marked dirty, which may result in an invalid page header
|
||||
* being left on disk. But we should not get here given the
|
||||
* test at the top of the routine, and the whole deal should
|
||||
* go away when we implement tuple splitting anyway...
|
||||
* BUG: by elog'ing here, we leave the new buffer locked and
|
||||
* not marked dirty, which may result in an invalid page
|
||||
* header being left on disk. But we should not get here
|
||||
* given the test at the top of the routine, and the whole
|
||||
* deal should go away when we implement tuple splitting
|
||||
* anyway...
|
||||
*/
|
||||
elog(ERROR, "Tuple is too big: size %u", len);
|
||||
}
|
||||
|
|
|
@ -2,13 +2,13 @@
|
|||
*
|
||||
* tuptoaster.c
|
||||
* Support routines for external and compressed storage of
|
||||
* variable size attributes.
|
||||
* variable size attributes.
|
||||
*
|
||||
* Copyright (c) 2000, PostgreSQL Global Development Group
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.2 2000/01/20 21:50:59 petere Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.3 2000/04/12 17:14:45 momjian Exp $
|
||||
*
|
||||
*
|
||||
* INTERFACE ROUTINES
|
||||
|
@ -30,17 +30,17 @@
|
|||
#ifdef TUPLE_TOASTER_ACTIVE
|
||||
|
||||
void
|
||||
heap_tuple_toast_attrs (Relation rel, HeapTuple newtup, HeapTuple oldtup)
|
||||
heap_tuple_toast_attrs(Relation rel, HeapTuple newtup, HeapTuple oldtup)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
varattrib *
|
||||
heap_tuple_untoast_attr (varattrib *attr)
|
||||
varattrib *
|
||||
heap_tuple_untoast_attr(varattrib * attr)
|
||||
{
|
||||
elog(ERROR, "heap_tuple_untoast_attr() called");
|
||||
}
|
||||
|
||||
|
||||
#endif /* TUPLE_TOASTER_ACTIVE */
|
||||
#endif /* TUPLE_TOASTER_ACTIVE */
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/index/genam.c,v 1.24 2000/03/14 23:52:01 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/index/genam.c,v 1.25 2000/04/12 17:14:47 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* many of the old access method routines have been turned into
|
||||
|
@ -62,7 +62,7 @@
|
|||
*
|
||||
* At the end of a scan, the AM's endscan routine undoes the locking,
|
||||
* but does *not* call IndexScanEnd --- the higher-level index_endscan
|
||||
* routine does that. (We can't do it in the AM because index_endscan
|
||||
* routine does that. (We can't do it in the AM because index_endscan
|
||||
* still needs to touch the IndexScanDesc after calling the AM.)
|
||||
*
|
||||
* Because of this, the AM does not have a choice whether to call
|
||||
|
@ -114,7 +114,10 @@ RelationGetIndexScan(Relation relation,
|
|||
ItemPointerSetInvalid(&scan->currentMarkData);
|
||||
ItemPointerSetInvalid(&scan->nextMarkData);
|
||||
|
||||
/* mark cached function lookup data invalid; it will be set on first use */
|
||||
/*
|
||||
* mark cached function lookup data invalid; it will be set on first
|
||||
* use
|
||||
*/
|
||||
scan->fn_getnext.fn_oid = InvalidOid;
|
||||
|
||||
if (numberOfKeys > 0)
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.41 2000/03/14 23:52:01 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.42 2000/04/12 17:14:47 momjian Exp $
|
||||
*
|
||||
* INTERFACE ROUTINES
|
||||
* index_open - open an index relation by relationId
|
||||
|
@ -115,10 +115,10 @@
|
|||
* index_open - open an index relation by relationId
|
||||
*
|
||||
* presently the relcache routines do all the work we need
|
||||
* to open/close index relations. However, callers of index_open
|
||||
* to open/close index relations. However, callers of index_open
|
||||
* expect it to succeed, so we need to check for a failure return.
|
||||
*
|
||||
* Note: we acquire no lock on the index. An AccessShareLock is
|
||||
* Note: we acquire no lock on the index. An AccessShareLock is
|
||||
* acquired by index_beginscan (and released by index_endscan).
|
||||
* ----------------
|
||||
*/
|
||||
|
@ -129,7 +129,7 @@ index_open(Oid relationId)
|
|||
|
||||
r = RelationIdGetRelation(relationId);
|
||||
|
||||
if (! RelationIsValid(r))
|
||||
if (!RelationIsValid(r))
|
||||
elog(ERROR, "Index %u does not exist", relationId);
|
||||
|
||||
if (r->rd_rel->relkind != RELKIND_INDEX)
|
||||
|
@ -151,7 +151,7 @@ index_openr(char *relationName)
|
|||
|
||||
r = RelationNameGetRelation(relationName);
|
||||
|
||||
if (! RelationIsValid(r))
|
||||
if (!RelationIsValid(r))
|
||||
elog(ERROR, "Index '%s' does not exist", relationName);
|
||||
|
||||
if (r->rd_rel->relkind != RELKIND_INDEX)
|
||||
|
@ -238,7 +238,7 @@ index_beginscan(Relation relation,
|
|||
* Acquire AccessShareLock for the duration of the scan
|
||||
*
|
||||
* Note: we could get an SI inval message here and consequently have
|
||||
* to rebuild the relcache entry. The refcount increment above
|
||||
* to rebuild the relcache entry. The refcount increment above
|
||||
* ensures that we will rebuild it and not just flush it...
|
||||
* ----------------
|
||||
*/
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.41 2000/02/18 09:29:16 inoue Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.42 2000/04/12 17:14:47 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -477,7 +477,7 @@ OperatorRelationFillScanKeyEntry(Relation operatorRelation,
|
|||
{
|
||||
HeapTuple tuple;
|
||||
HeapScanDesc scan = NULL;
|
||||
bool cachesearch = (!IsBootstrapProcessingMode()) && IsCacheInitialized();
|
||||
bool cachesearch = (!IsBootstrapProcessingMode()) && IsCacheInitialized();
|
||||
|
||||
if (cachesearch)
|
||||
{
|
||||
|
@ -547,7 +547,7 @@ IndexSupportInitialize(IndexStrategy indexStrategy,
|
|||
AttrNumber attributeNumber;
|
||||
int attributeIndex;
|
||||
Oid operatorClassObjectId[INDEX_MAX_KEYS];
|
||||
bool cachesearch = (!IsBootstrapProcessingMode()) && IsCacheInitialized();
|
||||
bool cachesearch = (!IsBootstrapProcessingMode()) && IsCacheInitialized();
|
||||
|
||||
if (cachesearch)
|
||||
{
|
||||
|
@ -674,7 +674,7 @@ IndexSupportInitialize(IndexStrategy indexStrategy,
|
|||
aform = (Form_pg_amop) GETSTRUCT(tuple);
|
||||
OperatorRelationFillScanKeyEntry(operatorRelation,
|
||||
aform->amopopr,
|
||||
StrategyMapGetScanKeyEntry(map, aform->amopstrategy));
|
||||
StrategyMapGetScanKeyEntry(map, aform->amopstrategy));
|
||||
}
|
||||
|
||||
heap_endscan(scan);
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.33 2000/02/10 19:51:38 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.34 2000/04/12 17:14:49 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* These functions are stored in pg_amproc. For each operator class
|
||||
|
@ -35,12 +35,12 @@ btint2cmp(int16 a, int16 b)
|
|||
int32
|
||||
btint4cmp(int32 a, int32 b)
|
||||
{
|
||||
if (a > b)
|
||||
return 1;
|
||||
else if (a == b)
|
||||
return 0;
|
||||
else
|
||||
return -1;
|
||||
if (a > b)
|
||||
return 1;
|
||||
else if (a == b)
|
||||
return 0;
|
||||
else
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.56 2000/03/17 02:36:03 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.57 2000/04/12 17:14:49 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -21,10 +21,10 @@
|
|||
|
||||
static InsertIndexResult _bt_insertonpg(Relation rel, Buffer buf, BTStack stack, int keysz, ScanKey scankey, BTItem btitem, BTItem afteritem);
|
||||
static Buffer _bt_split(Relation rel, Size keysz, ScanKey scankey,
|
||||
Buffer buf, OffsetNumber firstright);
|
||||
Buffer buf, OffsetNumber firstright);
|
||||
static OffsetNumber _bt_findsplitloc(Relation rel, Size keysz, ScanKey scankey,
|
||||
Page page, OffsetNumber start,
|
||||
OffsetNumber maxoff, Size llimit);
|
||||
Page page, OffsetNumber start,
|
||||
OffsetNumber maxoff, Size llimit);
|
||||
static void _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf);
|
||||
static OffsetNumber _bt_pgaddtup(Relation rel, Buffer buf, int keysz, ScanKey itup_scankey, Size itemsize, BTItem btitem, BTItem afteritem);
|
||||
static bool _bt_goesonpg(Relation rel, Buffer buf, Size keysz, ScanKey scankey, BTItem afteritem);
|
||||
|
@ -267,21 +267,20 @@ _bt_insertonpg(Relation rel,
|
|||
itemsz = IndexTupleDSize(btitem->bti_itup)
|
||||
+ (sizeof(BTItemData) - sizeof(IndexTupleData));
|
||||
|
||||
itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do
|
||||
* this but we need to be
|
||||
* consistent */
|
||||
itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this but
|
||||
* we need to be consistent */
|
||||
|
||||
/*
|
||||
* Check whether the item can fit on a btree page at all.
|
||||
* (Eventually, we ought to try to apply TOAST methods if not.)
|
||||
* We actually need to be able to fit three items on every page,
|
||||
* so restrict any one item to 1/3 the per-page available space.
|
||||
* Note that at this point, itemsz doesn't include the ItemId.
|
||||
* Check whether the item can fit on a btree page at all. (Eventually,
|
||||
* we ought to try to apply TOAST methods if not.) We actually need to
|
||||
* be able to fit three items on every page, so restrict any one item
|
||||
* to 1/3 the per-page available space. Note that at this point,
|
||||
* itemsz doesn't include the ItemId.
|
||||
*/
|
||||
if (itemsz > (PageGetPageSize(page)-sizeof(PageHeaderData)-MAXALIGN(sizeof(BTPageOpaqueData)))/3 - sizeof(ItemIdData))
|
||||
if (itemsz > (PageGetPageSize(page) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData))
|
||||
elog(ERROR, "btree: index item size %u exceeds maximum %lu",
|
||||
itemsz,
|
||||
(PageGetPageSize(page)-sizeof(PageHeaderData)-MAXALIGN(sizeof(BTPageOpaqueData)))/3 - sizeof(ItemIdData));
|
||||
(PageGetPageSize(page) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) /3 - sizeof(ItemIdData));
|
||||
|
||||
/*
|
||||
* If we have to insert item on the leftmost page which is the first
|
||||
|
@ -415,8 +414,8 @@ _bt_insertonpg(Relation rel,
|
|||
bool is_root = lpageop->btpo_flags & BTP_ROOT;
|
||||
|
||||
/*
|
||||
* Instead of splitting leaf page in the chain of duplicates
|
||||
* by new duplicate, insert it into some right page.
|
||||
* Instead of splitting leaf page in the chain of duplicates by
|
||||
* new duplicate, insert it into some right page.
|
||||
*/
|
||||
if ((lpageop->btpo_flags & BTP_CHAIN) &&
|
||||
(lpageop->btpo_flags & BTP_LEAF) && keys_equal)
|
||||
|
@ -424,8 +423,9 @@ _bt_insertonpg(Relation rel,
|
|||
rbuf = _bt_getbuf(rel, lpageop->btpo_next, BT_WRITE);
|
||||
rpage = BufferGetPage(rbuf);
|
||||
rpageop = (BTPageOpaque) PageGetSpecialPointer(rpage);
|
||||
/*
|
||||
* some checks
|
||||
|
||||
/*
|
||||
* some checks
|
||||
*/
|
||||
if (!P_RIGHTMOST(rpageop)) /* non-rightmost page */
|
||||
{ /* If we have the same hikey here then
|
||||
|
@ -442,6 +442,7 @@ _bt_insertonpg(Relation rel,
|
|||
BTGreaterStrategyNumber))
|
||||
elog(FATAL, "btree: hikey is out of order");
|
||||
else if (rpageop->btpo_flags & BTP_CHAIN)
|
||||
|
||||
/*
|
||||
* If hikey > scankey then it's last page in chain and
|
||||
* BTP_CHAIN must be OFF
|
||||
|
@ -450,9 +451,7 @@ _bt_insertonpg(Relation rel,
|
|||
}
|
||||
else
|
||||
/* rightmost page */
|
||||
{
|
||||
Assert(!(rpageop->btpo_flags & BTP_CHAIN));
|
||||
}
|
||||
_bt_relbuf(rel, buf, BT_WRITE);
|
||||
return (_bt_insertonpg(rel, rbuf, stack, keysz,
|
||||
scankey, btitem, afteritem));
|
||||
|
@ -708,7 +707,7 @@ l_spl: ;
|
|||
*/
|
||||
if (!parent_chained &&
|
||||
MAXALIGN(IndexTupleDSize(lowLeftItem->bti_itup)) ==
|
||||
MAXALIGN(IndexTupleDSize(stack->bts_btitem->bti_itup)))
|
||||
MAXALIGN(IndexTupleDSize(stack->bts_btitem->bti_itup)))
|
||||
{
|
||||
_bt_updateitem(rel, keysz, pbuf,
|
||||
stack->bts_btitem, lowLeftItem);
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.35 2000/01/26 05:55:58 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.36 2000/04/12 17:14:49 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* Postgres btree pages look like ordinary relation pages. The opaque
|
||||
|
@ -257,7 +257,7 @@ _bt_getroot(Relation rel, int access)
|
|||
else
|
||||
{
|
||||
rootblkno = metad->btm_root;
|
||||
_bt_relbuf(rel, metabuf, BT_READ); /* done with the meta page */
|
||||
_bt_relbuf(rel, metabuf, BT_READ); /* done with the meta page */
|
||||
|
||||
rootbuf = _bt_getbuf(rel, rootblkno, access);
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.53 2000/02/18 09:29:54 inoue Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.54 2000/04/12 17:14:49 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -77,7 +77,7 @@ btbuild(Relation heap,
|
|||
#endif
|
||||
Node *pred,
|
||||
*oldPred;
|
||||
BTSpool *spool = NULL;
|
||||
BTSpool *spool = NULL;
|
||||
bool isunique;
|
||||
bool usefast;
|
||||
|
||||
|
@ -185,7 +185,7 @@ btbuild(Relation heap,
|
|||
#ifndef OMIT_PARTIAL_INDEX
|
||||
/* SetSlotContents(slot, htup); */
|
||||
slot->val = htup;
|
||||
if (! ExecQual((List *) pred, econtext, false))
|
||||
if (!ExecQual((List *) pred, econtext, false))
|
||||
continue;
|
||||
#endif /* OMIT_PARTIAL_INDEX */
|
||||
}
|
||||
|
@ -276,9 +276,9 @@ btbuild(Relation heap,
|
|||
}
|
||||
|
||||
/*
|
||||
* if we are doing bottom-up btree build, finish the build by
|
||||
* (1) completing the sort of the spool file, (2) inserting the
|
||||
* sorted tuples into btree pages and (3) building the upper levels.
|
||||
* if we are doing bottom-up btree build, finish the build by (1)
|
||||
* completing the sort of the spool file, (2) inserting the sorted
|
||||
* tuples into btree pages and (3) building the upper levels.
|
||||
*/
|
||||
if (usefast)
|
||||
{
|
||||
|
@ -298,26 +298,27 @@ btbuild(Relation heap,
|
|||
/*
|
||||
* Since we just counted the tuples in the heap, we update its stats
|
||||
* in pg_class to guarantee that the planner takes advantage of the
|
||||
* index we just created. But, only update statistics during
|
||||
* normal index definitions, not for indices on system catalogs
|
||||
* created during bootstrap processing. We must close the relations
|
||||
* before updating statistics to guarantee that the relcache entries
|
||||
* are flushed when we increment the command counter in UpdateStats().
|
||||
* But we do not release any locks on the relations; those will be
|
||||
* held until end of transaction.
|
||||
* index we just created. But, only update statistics during normal
|
||||
* index definitions, not for indices on system catalogs created
|
||||
* during bootstrap processing. We must close the relations before
|
||||
* updating statistics to guarantee that the relcache entries are
|
||||
* flushed when we increment the command counter in UpdateStats(). But
|
||||
* we do not release any locks on the relations; those will be held
|
||||
* until end of transaction.
|
||||
*/
|
||||
if (IsNormalProcessingMode())
|
||||
{
|
||||
Oid hrelid = RelationGetRelid(heap);
|
||||
Oid irelid = RelationGetRelid(index);
|
||||
Oid hrelid = RelationGetRelid(heap);
|
||||
Oid irelid = RelationGetRelid(index);
|
||||
bool inplace = IsReindexProcessing();
|
||||
|
||||
heap_close(heap, NoLock);
|
||||
index_close(index);
|
||||
|
||||
/*
|
||||
UpdateStats(hrelid, nhtups, true);
|
||||
UpdateStats(irelid, nitups, false);
|
||||
*/
|
||||
* UpdateStats(hrelid, nhtups, true); UpdateStats(irelid, nitups,
|
||||
* false);
|
||||
*/
|
||||
UpdateStats(hrelid, nhtups, inplace);
|
||||
UpdateStats(irelid, nitups, inplace);
|
||||
if (oldPred != NULL)
|
||||
|
@ -623,7 +624,7 @@ _bt_restscan(IndexScanDesc scan)
|
|||
BTItem item;
|
||||
BlockNumber blkno;
|
||||
|
||||
LockBuffer(buf, BT_READ); /* lock buffer first! */
|
||||
LockBuffer(buf, BT_READ); /* lock buffer first! */
|
||||
page = BufferGetPage(buf);
|
||||
maxoff = PageGetMaxOffsetNumber(page);
|
||||
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/Attic/nbtscan.c,v 1.30 2000/01/26 05:55:58 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/Attic/nbtscan.c,v 1.31 2000/04/12 17:14:49 momjian Exp $
|
||||
*
|
||||
*
|
||||
* NOTES
|
||||
|
@ -52,13 +52,16 @@ static void _bt_scandel(IndexScanDesc scan, BlockNumber blkno, OffsetNumber offn
|
|||
void
|
||||
AtEOXact_nbtree(void)
|
||||
{
|
||||
/* Note: these actions should only be necessary during xact abort;
|
||||
* but they can't hurt during a commit.
|
||||
|
||||
/*
|
||||
* Note: these actions should only be necessary during xact abort; but
|
||||
* they can't hurt during a commit.
|
||||
*/
|
||||
|
||||
/* Reset the active-scans list to empty.
|
||||
* We do not need to free the list elements, because they're all
|
||||
* palloc()'d, so they'll go away at end of transaction anyway.
|
||||
/*
|
||||
* Reset the active-scans list to empty. We do not need to free the
|
||||
* list elements, because they're all palloc()'d, so they'll go away
|
||||
* at end of transaction anyway.
|
||||
*/
|
||||
BTScans = NULL;
|
||||
|
||||
|
@ -96,7 +99,7 @@ _bt_dropscan(IndexScanDesc scan)
|
|||
last = chk;
|
||||
|
||||
if (chk == (BTScanList) NULL)
|
||||
elog(ERROR, "btree scan list trashed; can't find 0x%p", (void*)scan);
|
||||
elog(ERROR, "btree scan list trashed; can't find 0x%p", (void *) scan);
|
||||
|
||||
if (last == (BTScanList) NULL)
|
||||
BTScans = chk->btsl_next;
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.58 2000/03/17 02:36:04 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.59 2000/04/12 17:14:49 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -299,9 +299,7 @@ _bt_skeycmp(Relation rel,
|
|||
compare = -1; /* not-NULL key "<" NULL datum */
|
||||
}
|
||||
else
|
||||
{
|
||||
compare = (int32) FMGR_PTR2(&entry->sk_func, keyDatum, attrDatum);
|
||||
}
|
||||
|
||||
if (compare != 0)
|
||||
break; /* done when we find unequal attributes */
|
||||
|
@ -368,26 +366,26 @@ _bt_binsrch(Relation rel,
|
|||
|
||||
/*
|
||||
* If there are no keys on the page, return the first available slot.
|
||||
* Note this covers two cases: the page is really empty (no keys),
|
||||
* or it contains only a high key. The latter case is possible after
|
||||
* Note this covers two cases: the page is really empty (no keys), or
|
||||
* it contains only a high key. The latter case is possible after
|
||||
* vacuuming.
|
||||
*/
|
||||
if (high < low)
|
||||
return low;
|
||||
|
||||
/*
|
||||
* Binary search to find the first key on the page >= scan key.
|
||||
* Loop invariant: all slots before 'low' are < scan key, all slots
|
||||
* at or after 'high' are >= scan key. Also, haveEq is true if the
|
||||
* tuple at 'high' is == scan key.
|
||||
* We can fall out when high == low.
|
||||
* Binary search to find the first key on the page >= scan key. Loop
|
||||
* invariant: all slots before 'low' are < scan key, all slots at or
|
||||
* after 'high' are >= scan key. Also, haveEq is true if the tuple at
|
||||
* 'high' is == scan key. We can fall out when high == low.
|
||||
*/
|
||||
high++; /* establish the loop invariant for high */
|
||||
haveEq = false;
|
||||
|
||||
while (high > low)
|
||||
{
|
||||
OffsetNumber mid = low + ((high - low) / 2);
|
||||
OffsetNumber mid = low + ((high - low) / 2);
|
||||
|
||||
/* We have low <= mid < high, so mid points at a real slot */
|
||||
|
||||
result = _bt_compare(rel, itupdesc, page, keysz, scankey, mid);
|
||||
|
@ -403,7 +401,7 @@ _bt_binsrch(Relation rel,
|
|||
|
||||
/*--------------------
|
||||
* At this point we have high == low, but be careful: they could point
|
||||
* past the last slot on the page. We also know that haveEq is true
|
||||
* past the last slot on the page. We also know that haveEq is true
|
||||
* if and only if there is an equal key (in which case high&low point
|
||||
* at the first equal key).
|
||||
*
|
||||
|
@ -443,18 +441,20 @@ _bt_binsrch(Relation rel,
|
|||
|
||||
if (haveEq)
|
||||
{
|
||||
|
||||
/*
|
||||
* There is an equal key. We return either the first equal key
|
||||
* (which we just found), or the last lesser key.
|
||||
*
|
||||
* We need not check srchtype != BT_DESCENT here, since if that
|
||||
* is true then natts == keysz by assumption.
|
||||
* We need not check srchtype != BT_DESCENT here, since if that is
|
||||
* true then natts == keysz by assumption.
|
||||
*/
|
||||
if (natts == keysz)
|
||||
return low; /* return first equal key */
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
/*
|
||||
* There is no equal key. We return either the first greater key
|
||||
* (which we just found), or the last lesser key.
|
||||
|
@ -524,6 +524,7 @@ _bt_compare(Relation rel,
|
|||
&& P_LEFTMOST(opaque)
|
||||
&& offnum == P_HIKEY)
|
||||
{
|
||||
|
||||
/*
|
||||
* we just have to believe that this will only be called with
|
||||
* offnum == P_HIKEY when P_HIKEY is the OffsetNumber of the first
|
||||
|
@ -702,11 +703,12 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
|
|||
|
||||
bool strategyCheck;
|
||||
ScanKey scankeys = 0;
|
||||
int keysCount = 0;
|
||||
int *nKeyIs = 0;
|
||||
int i, j;
|
||||
StrategyNumber strat_total;
|
||||
|
||||
int keysCount = 0;
|
||||
int *nKeyIs = 0;
|
||||
int i,
|
||||
j;
|
||||
StrategyNumber strat_total;
|
||||
|
||||
rel = scan->relation;
|
||||
so = (BTScanOpaque) scan->opaque;
|
||||
|
||||
|
@ -723,15 +725,15 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
|
|||
_bt_orderkeys(rel, so);
|
||||
|
||||
if (so->qual_ok)
|
||||
strategyCheck = true;
|
||||
}
|
||||
strategyCheck = true;
|
||||
}
|
||||
strat_total = BTEqualStrategyNumber;
|
||||
if (strategyCheck)
|
||||
{
|
||||
AttrNumber attno;
|
||||
|
||||
nKeyIs = (int *)palloc(so->numberOfKeys*sizeof(int));
|
||||
for (i=0; i < so->numberOfKeys; i++)
|
||||
nKeyIs = (int *) palloc(so->numberOfKeys * sizeof(int));
|
||||
for (i = 0; i < so->numberOfKeys; i++)
|
||||
{
|
||||
attno = so->keyData[i].sk_attno;
|
||||
if (attno == keysCount)
|
||||
|
@ -739,16 +741,16 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
|
|||
if (attno > keysCount + 1)
|
||||
break;
|
||||
strat = _bt_getstrat(rel, attno,
|
||||
so->keyData[i].sk_procedure);
|
||||
so->keyData[i].sk_procedure);
|
||||
if (strat == strat_total ||
|
||||
strat == BTEqualStrategyNumber)
|
||||
strat == BTEqualStrategyNumber)
|
||||
{
|
||||
nKeyIs[keysCount++] = i;
|
||||
continue;
|
||||
}
|
||||
if (ScanDirectionIsBackward(dir) &&
|
||||
(strat == BTLessStrategyNumber ||
|
||||
strat == BTLessEqualStrategyNumber) )
|
||||
(strat == BTLessStrategyNumber ||
|
||||
strat == BTLessEqualStrategyNumber))
|
||||
{
|
||||
nKeyIs[keysCount++] = i;
|
||||
strat_total = strat;
|
||||
|
@ -757,8 +759,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
|
|||
continue;
|
||||
}
|
||||
if (ScanDirectionIsForward(dir) &&
|
||||
(strat == BTGreaterStrategyNumber ||
|
||||
strat == BTGreaterEqualStrategyNumber) )
|
||||
(strat == BTGreaterStrategyNumber ||
|
||||
strat == BTGreaterEqualStrategyNumber))
|
||||
{
|
||||
nKeyIs[keysCount++] = i;
|
||||
strat_total = strat;
|
||||
|
@ -794,8 +796,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
|
|||
* at the right place in the scan.
|
||||
*/
|
||||
/* _bt_orderkeys disallows it, but it's place to add some code latter */
|
||||
scankeys = (ScanKey)palloc(keysCount*sizeof(ScanKeyData));
|
||||
for (i=0; i < keysCount; i++)
|
||||
scankeys = (ScanKey) palloc(keysCount * sizeof(ScanKeyData));
|
||||
for (i = 0; i < keysCount; i++)
|
||||
{
|
||||
j = nKeyIs[i];
|
||||
if (so->keyData[j].sk_flags & SK_ISNULL)
|
||||
|
@ -804,12 +806,13 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
|
|||
pfree(scankeys);
|
||||
elog(ERROR, "_bt_first: btree doesn't support is(not)null, yet");
|
||||
return ((RetrieveIndexResult) NULL);
|
||||
}
|
||||
proc = index_getprocid(rel, i+1, BTORDER_PROC);
|
||||
ScanKeyEntryInitialize(scankeys+i, so->keyData[j].sk_flags,
|
||||
i+1, proc, so->keyData[j].sk_argument);
|
||||
}
|
||||
proc = index_getprocid(rel, i + 1, BTORDER_PROC);
|
||||
ScanKeyEntryInitialize(scankeys + i, so->keyData[j].sk_flags,
|
||||
i + 1, proc, so->keyData[j].sk_argument);
|
||||
}
|
||||
if (nKeyIs) pfree(nKeyIs);
|
||||
if (nKeyIs)
|
||||
pfree(nKeyIs);
|
||||
|
||||
stack = _bt_search(rel, keysCount, scankeys, &buf);
|
||||
_bt_freestack(stack);
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*
|
||||
* We use tuplesort.c to sort the given index tuples into order.
|
||||
* Then we scan the index tuples in order and build the btree pages
|
||||
* for each level. When we have only one page on a level, it must be the
|
||||
* for each level. When we have only one page on a level, it must be the
|
||||
* root -- it can be attached to the btree metapage and we are done.
|
||||
*
|
||||
* this code is moderately slow (~10% slower) compared to the regular
|
||||
|
@ -28,7 +28,7 @@
|
|||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.51 2000/02/18 06:32:39 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.52 2000/04/12 17:14:49 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -70,12 +70,12 @@ struct BTSpool
|
|||
|
||||
static void _bt_load(Relation index, BTSpool *btspool);
|
||||
static BTItem _bt_buildadd(Relation index, Size keysz, ScanKey scankey,
|
||||
BTPageState *state, BTItem bti, int flags);
|
||||
BTPageState *state, BTItem bti, int flags);
|
||||
static BTItem _bt_minitem(Page opage, BlockNumber oblkno, int atend);
|
||||
static BTPageState *_bt_pagestate(Relation index, int flags,
|
||||
int level, bool doupper);
|
||||
int level, bool doupper);
|
||||
static void _bt_uppershutdown(Relation index, Size keysz, ScanKey scankey,
|
||||
BTPageState *state);
|
||||
BTPageState *state);
|
||||
|
||||
|
||||
/*
|
||||
|
@ -86,7 +86,7 @@ static void _bt_uppershutdown(Relation index, Size keysz, ScanKey scankey,
|
|||
/*
|
||||
* create and initialize a spool structure
|
||||
*/
|
||||
BTSpool *
|
||||
BTSpool *
|
||||
_bt_spoolinit(Relation index, bool isunique)
|
||||
{
|
||||
BTSpool *btspool = (BTSpool *) palloc(sizeof(BTSpool));
|
||||
|
@ -99,9 +99,9 @@ _bt_spoolinit(Relation index, bool isunique)
|
|||
btspool->sortstate = tuplesort_begin_index(index, isunique, false);
|
||||
|
||||
/*
|
||||
* Currently, tuplesort provides sort functions on IndexTuples.
|
||||
* If we kept anything in a BTItem other than a regular IndexTuple,
|
||||
* we'd need to modify tuplesort to understand BTItems as such.
|
||||
* Currently, tuplesort provides sort functions on IndexTuples. If we
|
||||
* kept anything in a BTItem other than a regular IndexTuple, we'd
|
||||
* need to modify tuplesort to understand BTItems as such.
|
||||
*/
|
||||
Assert(sizeof(BTItemData) == sizeof(IndexTupleData));
|
||||
|
||||
|
@ -306,20 +306,20 @@ _bt_buildadd(Relation index, Size keysz, ScanKey scankey,
|
|||
btisz = MAXALIGN(btisz);
|
||||
|
||||
/*
|
||||
* Check whether the item can fit on a btree page at all.
|
||||
* (Eventually, we ought to try to apply TOAST methods if not.)
|
||||
* We actually need to be able to fit three items on every page,
|
||||
* so restrict any one item to 1/3 the per-page available space.
|
||||
* Note that at this point, btisz doesn't include the ItemId.
|
||||
* Check whether the item can fit on a btree page at all. (Eventually,
|
||||
* we ought to try to apply TOAST methods if not.) We actually need to
|
||||
* be able to fit three items on every page, so restrict any one item
|
||||
* to 1/3 the per-page available space. Note that at this point, btisz
|
||||
* doesn't include the ItemId.
|
||||
*
|
||||
* NOTE: similar code appears in _bt_insertonpg() to defend against
|
||||
* oversize items being inserted into an already-existing index.
|
||||
* But during creation of an index, we don't go through there.
|
||||
* oversize items being inserted into an already-existing index. But
|
||||
* during creation of an index, we don't go through there.
|
||||
*/
|
||||
if (btisz > (PageGetPageSize(npage)-sizeof(PageHeaderData)-MAXALIGN(sizeof(BTPageOpaqueData)))/3 - sizeof(ItemIdData))
|
||||
if (btisz > (PageGetPageSize(npage) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData))
|
||||
elog(ERROR, "btree: index item size %d exceeds maximum %ld",
|
||||
btisz,
|
||||
(PageGetPageSize(npage)-sizeof(PageHeaderData)-MAXALIGN(sizeof(BTPageOpaqueData)))/3 - sizeof(ItemIdData));
|
||||
(PageGetPageSize(npage) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) /3 - sizeof(ItemIdData));
|
||||
|
||||
if (pgspc < btisz)
|
||||
{
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.35 2000/02/18 06:32:39 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.36 2000/04/12 17:14:50 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -141,7 +141,8 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
|
|||
uint16 numberOfKeys = so->numberOfKeys;
|
||||
uint16 new_numberOfKeys = 0;
|
||||
AttrNumber attno = 1;
|
||||
bool equalStrategyEnd, underEqualStrategy;
|
||||
bool equalStrategyEnd,
|
||||
underEqualStrategy;
|
||||
|
||||
if (numberOfKeys < 1)
|
||||
return;
|
||||
|
@ -194,6 +195,7 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
|
|||
elog(ERROR, "_bt_orderkeys: key(s) for attribute %d missed", attno + 1);
|
||||
|
||||
underEqualStrategy = (!equalStrategyEnd);
|
||||
|
||||
/*
|
||||
* If = has been specified, no other key will be used. In case
|
||||
* of key < 2 && key == 1 and so on we have to set qual_ok to
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.44 2000/03/01 05:39:23 inoue Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.45 2000/04/12 17:14:51 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -181,7 +181,7 @@ rtbuild(Relation heap,
|
|||
#ifndef OMIT_PARTIAL_INDEX
|
||||
/* SetSlotContents(slot, htup); */
|
||||
slot->val = htup;
|
||||
if (! ExecQual((List *) pred, econtext, false))
|
||||
if (!ExecQual((List *) pred, econtext, false))
|
||||
continue;
|
||||
#endif /* OMIT_PARTIAL_INDEX */
|
||||
}
|
||||
|
@ -249,18 +249,18 @@ rtbuild(Relation heap,
|
|||
/*
|
||||
* Since we just counted the tuples in the heap, we update its stats
|
||||
* in pg_class to guarantee that the planner takes advantage of the
|
||||
* index we just created. But, only update statistics during
|
||||
* normal index definitions, not for indices on system catalogs
|
||||
* created during bootstrap processing. We must close the relations
|
||||
* before updating statistics to guarantee that the relcache entries
|
||||
* are flushed when we increment the command counter in UpdateStats().
|
||||
* But we do not release any locks on the relations; those will be
|
||||
* held until end of transaction.
|
||||
* index we just created. But, only update statistics during normal
|
||||
* index definitions, not for indices on system catalogs created
|
||||
* during bootstrap processing. We must close the relations before
|
||||
* updating statistics to guarantee that the relcache entries are
|
||||
* flushed when we increment the command counter in UpdateStats(). But
|
||||
* we do not release any locks on the relations; those will be held
|
||||
* until end of transaction.
|
||||
*/
|
||||
if (IsNormalProcessingMode())
|
||||
{
|
||||
Oid hrelid = RelationGetRelid(heap);
|
||||
Oid irelid = RelationGetRelid(index);
|
||||
Oid hrelid = RelationGetRelid(heap);
|
||||
Oid irelid = RelationGetRelid(index);
|
||||
bool inplace = IsReindexProcessing();
|
||||
|
||||
heap_close(heap, NoLock);
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.31 2000/01/26 05:56:00 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.32 2000/04/12 17:14:51 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -268,7 +268,7 @@ rtdropscan(IndexScanDesc s)
|
|||
prev = l;
|
||||
|
||||
if (l == (RTScanList) NULL)
|
||||
elog(ERROR, "rtree scan list corrupted -- cannot find 0x%p", (void*)s);
|
||||
elog(ERROR, "rtree scan list corrupted -- cannot find 0x%p", (void *) s);
|
||||
|
||||
if (prev == (RTScanList) NULL)
|
||||
RTScans = l->rtsl_next;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#include "postgres.h"
|
||||
#include "access/rmgr.h"
|
||||
|
||||
RmgrData *RmgrTable = NULL;
|
||||
RmgrData *RmgrTable = NULL;
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.33 2000/01/26 05:56:03 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.34 2000/04/12 17:14:52 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* This file contains the high level access-method interface to the
|
||||
|
@ -162,6 +162,7 @@ TransactionLogTest(TransactionId transactionId, /* transaction id to test */
|
|||
|
||||
if (!fail)
|
||||
{
|
||||
|
||||
/*
|
||||
* DO NOT cache status for transactions in unknown state !!!
|
||||
*/
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.27 2000/03/31 02:43:31 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.28 2000/04/12 17:14:53 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -331,8 +331,8 @@ ReadNewTransactionId(TransactionId *xid)
|
|||
SpinAcquire(OidGenLockId); /* not good for concurrency... */
|
||||
|
||||
/*
|
||||
* Note that we don't check is ShmemVariableCache->xid_count equal
|
||||
* to 0 or not. This will work as long as we don't call
|
||||
* Note that we don't check is ShmemVariableCache->xid_count equal to
|
||||
* 0 or not. This will work as long as we don't call
|
||||
* ReadNewTransactionId() before GetNewTransactionId().
|
||||
*/
|
||||
if (ShmemVariableCache->nextXid == 0)
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.63 2000/04/09 04:43:16 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.64 2000/04/12 17:14:53 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* Transaction aborts can now occur two ways:
|
||||
|
@ -160,7 +160,7 @@
|
|||
#include "utils/portal.h"
|
||||
#include "utils/relcache.h"
|
||||
|
||||
extern bool SharedBufferChanged;
|
||||
extern bool SharedBufferChanged;
|
||||
|
||||
static void AbortTransaction(void);
|
||||
static void AtAbort_Cache(void);
|
||||
|
@ -517,8 +517,8 @@ CommandCounterIncrement()
|
|||
CurrentTransactionStateData.scanCommandId = CurrentTransactionStateData.commandId;
|
||||
|
||||
/*
|
||||
* make cache changes visible to me. AtCommit_LocalCache()
|
||||
* instead of AtCommit_Cache() is called here.
|
||||
* make cache changes visible to me. AtCommit_LocalCache() instead of
|
||||
* AtCommit_Cache() is called here.
|
||||
*/
|
||||
AtCommit_LocalCache();
|
||||
AtStart_Cache();
|
||||
|
@ -627,16 +627,15 @@ RecordTransactionCommit()
|
|||
*/
|
||||
xid = GetCurrentTransactionId();
|
||||
|
||||
/*
|
||||
* flush the buffer manager pages. Note: if we have stable
|
||||
* main memory, dirty shared buffers are not flushed
|
||||
* plai 8/7/90
|
||||
/*
|
||||
* flush the buffer manager pages. Note: if we have stable main
|
||||
* memory, dirty shared buffers are not flushed plai 8/7/90
|
||||
*/
|
||||
leak = BufferPoolCheckLeak();
|
||||
|
||||
/*
|
||||
* If no one shared buffer was changed by this transaction then
|
||||
* we don't flush shared buffers and don't record commit status.
|
||||
* If no one shared buffer was changed by this transaction then we
|
||||
* don't flush shared buffers and don't record commit status.
|
||||
*/
|
||||
if (SharedBufferChanged)
|
||||
{
|
||||
|
@ -645,13 +644,13 @@ RecordTransactionCommit()
|
|||
ResetBufferPool(true);
|
||||
|
||||
/*
|
||||
* have the transaction access methods record the status
|
||||
* of this transaction id in the pg_log relation.
|
||||
* have the transaction access methods record the status of this
|
||||
* transaction id in the pg_log relation.
|
||||
*/
|
||||
TransactionIdCommit(xid);
|
||||
|
||||
/*
|
||||
* Now write the log info to the disk too.
|
||||
* Now write the log info to the disk too.
|
||||
*/
|
||||
leak = BufferPoolCheckLeak();
|
||||
FlushBufferPool();
|
||||
|
@ -751,10 +750,10 @@ RecordTransactionAbort()
|
|||
*/
|
||||
xid = GetCurrentTransactionId();
|
||||
|
||||
/*
|
||||
* Have the transaction access methods record the status of
|
||||
* this transaction id in the pg_log relation. We skip it
|
||||
* if no one shared buffer was changed by this transaction.
|
||||
/*
|
||||
* Have the transaction access methods record the status of this
|
||||
* transaction id in the pg_log relation. We skip it if no one shared
|
||||
* buffer was changed by this transaction.
|
||||
*/
|
||||
if (SharedBufferChanged && !TransactionIdDidCommit(xid))
|
||||
TransactionIdAbort(xid);
|
||||
|
@ -936,7 +935,7 @@ CommitTransaction()
|
|||
/* ----------------
|
||||
* Tell the trigger manager that this transaction is about to be
|
||||
* committed. He'll invoke all trigger deferred until XACT before
|
||||
* we really start on committing the transaction.
|
||||
* we really start on committing the transaction.
|
||||
* ----------------
|
||||
*/
|
||||
DeferredTriggerEndXact();
|
||||
|
@ -965,13 +964,13 @@ CommitTransaction()
|
|||
RecordTransactionCommit();
|
||||
|
||||
/*
|
||||
* Let others know about no transaction in progress by me.
|
||||
* Note that this must be done _before_ releasing locks we hold
|
||||
* and SpinAcquire(SInvalLock) is required: UPDATE with xid 0 is
|
||||
* blocked by xid 1' UPDATE, xid 1 is doing commit while xid 2
|
||||
* gets snapshot - if xid 2' GetSnapshotData sees xid 1 as running
|
||||
* then it must see xid 0 as running as well or it will see two
|
||||
* tuple versions - one deleted by xid 1 and one inserted by xid 0.
|
||||
* Let others know about no transaction in progress by me. Note that
|
||||
* this must be done _before_ releasing locks we hold and
|
||||
* SpinAcquire(SInvalLock) is required: UPDATE with xid 0 is blocked
|
||||
* by xid 1' UPDATE, xid 1 is doing commit while xid 2 gets snapshot -
|
||||
* if xid 2' GetSnapshotData sees xid 1 as running then it must see
|
||||
* xid 0 as running as well or it will see two tuple versions - one
|
||||
* deleted by xid 1 and one inserted by xid 0.
|
||||
*/
|
||||
if (MyProc != (PROC *) NULL)
|
||||
{
|
||||
|
@ -995,7 +994,7 @@ CommitTransaction()
|
|||
* ----------------
|
||||
*/
|
||||
s->state = TRANS_DEFAULT;
|
||||
SharedBufferChanged = false; /* safest place to do it */
|
||||
SharedBufferChanged = false;/* safest place to do it */
|
||||
|
||||
}
|
||||
|
||||
|
@ -1031,7 +1030,7 @@ AbortTransaction()
|
|||
|
||||
/* ----------------
|
||||
* Tell the trigger manager that this transaction is about to be
|
||||
* aborted.
|
||||
* aborted.
|
||||
* ----------------
|
||||
*/
|
||||
DeferredTriggerAbortXact();
|
||||
|
@ -1070,7 +1069,7 @@ AbortTransaction()
|
|||
* ----------------
|
||||
*/
|
||||
s->state = TRANS_DEFAULT;
|
||||
SharedBufferChanged = false; /* safest place to do it */
|
||||
SharedBufferChanged = false;/* safest place to do it */
|
||||
}
|
||||
|
||||
/* --------------------------------
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -8,7 +8,7 @@
|
|||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.80 2000/02/18 09:28:39 inoue Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.81 2000/04/12 17:14:54 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -39,13 +39,13 @@
|
|||
|
||||
#define ALLOC(t, c) (t *)calloc((unsigned)(c), sizeof(t))
|
||||
|
||||
extern void BaseInit(void);
|
||||
extern void StartupXLOG(void);
|
||||
extern void ShutdownXLOG(void);
|
||||
extern void BootStrapXLOG(void);
|
||||
extern void BaseInit(void);
|
||||
extern void StartupXLOG(void);
|
||||
extern void ShutdownXLOG(void);
|
||||
extern void BootStrapXLOG(void);
|
||||
|
||||
extern char XLogDir[];
|
||||
extern char ControlFilePath[];
|
||||
extern char XLogDir[];
|
||||
extern char ControlFilePath[];
|
||||
|
||||
extern int Int_yyparse(void);
|
||||
static hashnode *AddStr(char *str, int strlength, int mderef);
|
||||
|
@ -107,7 +107,7 @@ static struct typinfo Procid[] = {
|
|||
{"char", CHAROID, 0, 1, F_CHARIN, F_CHAROUT},
|
||||
{"name", NAMEOID, 0, NAMEDATALEN, F_NAMEIN, F_NAMEOUT},
|
||||
{"int2", INT2OID, 0, 2, F_INT2IN, F_INT2OUT},
|
||||
{"int2vector", INT2VECTOROID, 0, INDEX_MAX_KEYS*2, F_INT2VECTORIN, F_INT2VECTOROUT},
|
||||
{"int2vector", INT2VECTOROID, 0, INDEX_MAX_KEYS * 2, F_INT2VECTORIN, F_INT2VECTOROUT},
|
||||
{"int4", INT4OID, 0, 4, F_INT4IN, F_INT4OUT},
|
||||
{"regproc", REGPROCOID, 0, 4, F_REGPROCIN, F_REGPROCOUT},
|
||||
{"text", TEXTOID, 0, -1, F_TEXTIN, F_TEXTOUT},
|
||||
|
@ -115,7 +115,7 @@ static struct typinfo Procid[] = {
|
|||
{"tid", TIDOID, 0, 6, F_TIDIN, F_TIDOUT},
|
||||
{"xid", XIDOID, 0, 4, F_XIDIN, F_XIDOUT},
|
||||
{"cid", CIDOID, 0, 4, F_CIDIN, F_CIDOUT},
|
||||
{"oidvector", 30, 0, INDEX_MAX_KEYS*4, F_OIDVECTORIN, F_OIDVECTOROUT},
|
||||
{"oidvector", 30, 0, INDEX_MAX_KEYS * 4, F_OIDVECTORIN, F_OIDVECTOROUT},
|
||||
{"smgr", 210, 0, 2, F_SMGRIN, F_SMGROUT},
|
||||
{"_int4", 1007, INT4OID, -1, F_ARRAY_IN, F_ARRAY_OUT},
|
||||
{"_aclitem", 1034, 1033, -1, F_ARRAY_IN, F_ARRAY_OUT}
|
||||
|
@ -325,8 +325,8 @@ BootstrapMain(int argc, char *argv[])
|
|||
}
|
||||
|
||||
/*
|
||||
* Bootstrap under Postmaster means two things:
|
||||
* (xloginit) ? StartupXLOG : ShutdownXLOG
|
||||
* Bootstrap under Postmaster means two things: (xloginit) ?
|
||||
* StartupXLOG : ShutdownXLOG
|
||||
*
|
||||
* If !under Postmaster and xloginit then BootStrapXLOG.
|
||||
*/
|
||||
|
@ -345,9 +345,7 @@ BootstrapMain(int argc, char *argv[])
|
|||
}
|
||||
|
||||
if (!IsUnderPostmaster && xloginit)
|
||||
{
|
||||
BootStrapXLOG();
|
||||
}
|
||||
|
||||
/*
|
||||
* backend initialization
|
||||
|
@ -478,7 +476,7 @@ boot_openrel(char *relname)
|
|||
*/
|
||||
if (namestrcmp(&attrtypes[i]->attname, "attisset") == 0)
|
||||
attrtypes[i]->attisset = get_attisset(RelationGetRelid(reldesc),
|
||||
NameStr(attrtypes[i]->attname));
|
||||
NameStr(attrtypes[i]->attname));
|
||||
else
|
||||
attrtypes[i]->attisset = false;
|
||||
|
||||
|
@ -1153,8 +1151,10 @@ build_indices()
|
|||
index_build(heap, ind, ILHead->il_natts, ILHead->il_attnos,
|
||||
ILHead->il_nparams, ILHead->il_params, ILHead->il_finfo,
|
||||
ILHead->il_predInfo);
|
||||
/* In normal processing mode, index_build would close the heap
|
||||
* and index, but in bootstrap mode it will not.
|
||||
|
||||
/*
|
||||
* In normal processing mode, index_build would close the heap and
|
||||
* index, but in bootstrap mode it will not.
|
||||
*/
|
||||
|
||||
/*
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/aclchk.c,v 1.37 2000/01/26 05:56:09 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/aclchk.c,v 1.38 2000/04/12 17:14:55 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* See acl.h.
|
||||
|
@ -364,7 +364,7 @@ pg_aclcheck(char *relname, char *usename, AclMode mode)
|
|||
*/
|
||||
if (((mode & ACL_WR) || (mode & ACL_AP)) &&
|
||||
!allowSystemTableMods && IsSystemRelationName(relname) &&
|
||||
strncmp(relname,"pg_temp.", strlen("pg_temp.")) != 0 &&
|
||||
strncmp(relname, "pg_temp.", strlen("pg_temp.")) != 0 &&
|
||||
!((Form_pg_shadow) GETSTRUCT(tuple))->usecatupd)
|
||||
{
|
||||
elog(DEBUG, "pg_aclcheck: catalog update to \"%s\": permission denied",
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/catalog.c,v 1.31 2000/04/09 04:43:15 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/catalog.c,v 1.32 2000/04/12 17:14:55 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -44,6 +44,7 @@ relpath(const char *relname)
|
|||
snprintf(path, bufsize, "%s%c%s", DataDir, SEP_CHAR, relname);
|
||||
return path;
|
||||
}
|
||||
|
||||
/*
|
||||
* If it is in the current database, assume it is in current working
|
||||
* directory. NB: this does not work during bootstrap!
|
||||
|
@ -55,7 +56,7 @@ relpath(const char *relname)
|
|||
* relpath_blind - construct path to a relation's file
|
||||
*
|
||||
* Construct the path using only the info available to smgrblindwrt,
|
||||
* namely the names and OIDs of the database and relation. (Shared system
|
||||
* namely the names and OIDs of the database and relation. (Shared system
|
||||
* relations are identified with dbid = 0.) Note that we may have to
|
||||
* access a relation belonging to a different database!
|
||||
*
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.124 2000/03/17 02:36:05 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.125 2000/04/12 17:14:55 momjian Exp $
|
||||
*
|
||||
*
|
||||
* INTERFACE ROUTINES
|
||||
|
@ -69,9 +69,9 @@
|
|||
|
||||
|
||||
static void AddNewRelationTuple(Relation pg_class_desc,
|
||||
Relation new_rel_desc, Oid new_rel_oid,
|
||||
int natts,
|
||||
char relkind, char *temp_relname);
|
||||
Relation new_rel_desc, Oid new_rel_oid,
|
||||
int natts,
|
||||
char relkind, char *temp_relname);
|
||||
static void AddToNoNameRelList(Relation r);
|
||||
|
||||
static void DeleteAttributeTuples(Relation rel);
|
||||
|
@ -82,7 +82,7 @@ static void RelationRemoveInheritance(Relation relation);
|
|||
static void RemoveFromNoNameRelList(Relation r);
|
||||
static void AddNewRelationType(char *typeName, Oid new_rel_oid);
|
||||
static void StoreAttrDefault(Relation rel, AttrNumber attnum, char *adbin,
|
||||
bool updatePgAttribute);
|
||||
bool updatePgAttribute);
|
||||
static void StoreRelCheck(Relation rel, char *ccname, char *ccbin);
|
||||
static void StoreConstraints(Relation rel);
|
||||
static void RemoveConstraints(Relation rel);
|
||||
|
@ -271,8 +271,9 @@ heap_create(char *relname,
|
|||
|
||||
rel = (Relation) palloc(len);
|
||||
MemSet((char *) rel, 0, len);
|
||||
rel->rd_fd = -1; /* table is not open */
|
||||
rel->rd_fd = -1; /* table is not open */
|
||||
rel->rd_unlinked = TRUE; /* table is not created yet */
|
||||
|
||||
/*
|
||||
* create a new tuple descriptor from the one passed in
|
||||
*/
|
||||
|
@ -345,7 +346,7 @@ heap_create(char *relname,
|
|||
bool
|
||||
heap_storage_create(Relation rel)
|
||||
{
|
||||
bool smgrcall = false;
|
||||
bool smgrcall = false;
|
||||
|
||||
if (rel->rd_unlinked)
|
||||
{
|
||||
|
@ -715,6 +716,7 @@ AddNewRelationTuple(Relation pg_class_desc,
|
|||
|
||||
if (!IsIgnoringSystemIndexes())
|
||||
{
|
||||
|
||||
/*
|
||||
* First, open the catalog indices and insert index tuples for the
|
||||
* new relation.
|
||||
|
@ -878,7 +880,7 @@ heap_create_with_catalog(char *relname,
|
|||
* SOMEDAY: fill the STATISTIC relation properly.
|
||||
* ----------------
|
||||
*/
|
||||
heap_close(new_rel_desc, NoLock); /* do not unlock till end of xact */
|
||||
heap_close(new_rel_desc, NoLock); /* do not unlock till end of xact */
|
||||
heap_close(pg_class_desc, RowExclusiveLock);
|
||||
|
||||
return new_rel_oid;
|
||||
|
@ -893,7 +895,7 @@ heap_create_with_catalog(char *relname,
|
|||
* 3) remove indexes
|
||||
* 4) remove pg_class tuple
|
||||
* 5) remove pg_attribute tuples and related descriptions
|
||||
* 6) remove pg_description tuples
|
||||
* 6) remove pg_description tuples
|
||||
* 7) remove pg_type tuples
|
||||
* 8) RemoveConstraints ()
|
||||
* 9) unlink relation
|
||||
|
@ -963,7 +965,7 @@ RelationRemoveInheritance(Relation relation)
|
|||
tuple = heap_getnext(scan, 0);
|
||||
if (HeapTupleIsValid(tuple))
|
||||
{
|
||||
Oid subclass = ((Form_pg_inherits) GETSTRUCT(tuple))->inhrelid;
|
||||
Oid subclass = ((Form_pg_inherits) GETSTRUCT(tuple))->inhrelid;
|
||||
|
||||
heap_endscan(scan);
|
||||
heap_close(catalogRelation, RowExclusiveLock);
|
||||
|
@ -1073,7 +1075,7 @@ DeleteRelationTuple(Relation rel)
|
|||
{
|
||||
heap_close(pg_class_desc, RowExclusiveLock);
|
||||
elog(ERROR, "Relation '%s' does not exist",
|
||||
RelationGetRelationName(rel));
|
||||
RelationGetRelationName(rel));
|
||||
}
|
||||
|
||||
/* ----------------
|
||||
|
@ -1096,19 +1098,27 @@ DeleteRelationTuple(Relation rel)
|
|||
static void
|
||||
RelationTruncateIndexes(Relation heapRelation)
|
||||
{
|
||||
Relation indexRelation, currentIndex;
|
||||
Relation indexRelation,
|
||||
currentIndex;
|
||||
ScanKeyData entry;
|
||||
HeapScanDesc scan;
|
||||
HeapTuple indexTuple, procTuple, classTuple;
|
||||
HeapTuple indexTuple,
|
||||
procTuple,
|
||||
classTuple;
|
||||
Form_pg_index index;
|
||||
Oid heapId, indexId, procId, accessMethodId;
|
||||
Node *oldPred = NULL;
|
||||
PredInfo *predInfo;
|
||||
List *cnfPred = NULL;
|
||||
Oid heapId,
|
||||
indexId,
|
||||
procId,
|
||||
accessMethodId;
|
||||
Node *oldPred = NULL;
|
||||
PredInfo *predInfo;
|
||||
List *cnfPred = NULL;
|
||||
AttrNumber *attributeNumberA;
|
||||
FuncIndexInfo fInfo, *funcInfo = NULL;
|
||||
int i, numberOfAttributes;
|
||||
char *predString;
|
||||
FuncIndexInfo fInfo,
|
||||
*funcInfo = NULL;
|
||||
int i,
|
||||
numberOfAttributes;
|
||||
char *predString;
|
||||
|
||||
heapId = RelationGetRelid(heapRelation);
|
||||
|
||||
|
@ -1120,8 +1130,10 @@ RelationTruncateIndexes(Relation heapRelation)
|
|||
scan = heap_beginscan(indexRelation, false, SnapshotNow, 1, &entry);
|
||||
while (HeapTupleIsValid(indexTuple = heap_getnext(scan, 0)))
|
||||
{
|
||||
|
||||
/*
|
||||
* For each index, fetch index attributes so we can apply index_build
|
||||
* For each index, fetch index attributes so we can apply
|
||||
* index_build
|
||||
*/
|
||||
index = (Form_pg_index) GETSTRUCT(indexTuple);
|
||||
indexId = index->indexrelid;
|
||||
|
@ -1181,8 +1193,8 @@ RelationTruncateIndexes(Relation heapRelation)
|
|||
LockRelation(currentIndex, AccessExclusiveLock);
|
||||
|
||||
/*
|
||||
* Release any buffers associated with this index. If they're dirty,
|
||||
* they're just dropped without bothering to flush to disk.
|
||||
* Release any buffers associated with this index. If they're
|
||||
* dirty, they're just dropped without bothering to flush to disk.
|
||||
*/
|
||||
ReleaseRelationBuffers(currentIndex);
|
||||
if (FlushRelationBuffers(currentIndex, (BlockNumber) 0, false) < 0)
|
||||
|
@ -1198,35 +1210,35 @@ RelationTruncateIndexes(Relation heapRelation)
|
|||
attributeNumberA, 0, NULL, funcInfo, predInfo);
|
||||
|
||||
/*
|
||||
* index_build will close both the heap and index relations
|
||||
* (but not give up the locks we hold on them). That's fine
|
||||
* for the index, but we need to open the heap again. We need
|
||||
* no new lock, since this backend still has the exclusive lock
|
||||
* grabbed by heap_truncate.
|
||||
* index_build will close both the heap and index relations (but
|
||||
* not give up the locks we hold on them). That's fine for the
|
||||
* index, but we need to open the heap again. We need no new
|
||||
* lock, since this backend still has the exclusive lock grabbed
|
||||
* by heap_truncate.
|
||||
*/
|
||||
heapRelation = heap_open(heapId, NoLock);
|
||||
Assert(heapRelation != NULL);
|
||||
}
|
||||
|
||||
/* Complete the scan and close pg_index */
|
||||
heap_endscan(scan);
|
||||
heap_endscan(scan);
|
||||
heap_close(indexRelation, AccessShareLock);
|
||||
}
|
||||
|
||||
/* ----------------------------
|
||||
* heap_truncate
|
||||
* heap_truncate
|
||||
*
|
||||
* This routine is used to truncate the data from the
|
||||
* storage manager of any data within the relation handed
|
||||
* to this routine.
|
||||
* This routine is used to truncate the data from the
|
||||
* storage manager of any data within the relation handed
|
||||
* to this routine.
|
||||
* ----------------------------
|
||||
*/
|
||||
|
||||
void
|
||||
heap_truncate(char *relname)
|
||||
{
|
||||
Relation rel;
|
||||
Oid rid;
|
||||
Relation rel;
|
||||
Oid rid;
|
||||
|
||||
/* Open relation for processing, and grab exclusive access on it. */
|
||||
|
||||
|
@ -1245,12 +1257,12 @@ heap_truncate(char *relname)
|
|||
* they don't exist anyway. So, no warning in that case.
|
||||
* ----------------
|
||||
*/
|
||||
if (IsTransactionBlock() && ! rel->rd_myxactonly)
|
||||
if (IsTransactionBlock() && !rel->rd_myxactonly)
|
||||
elog(NOTICE, "Caution: TRUNCATE TABLE cannot be rolled back, so don't abort now");
|
||||
|
||||
/*
|
||||
* Release any buffers associated with this relation. If they're dirty,
|
||||
* they're just dropped without bothering to flush to disk.
|
||||
* Release any buffers associated with this relation. If they're
|
||||
* dirty, they're just dropped without bothering to flush to disk.
|
||||
*/
|
||||
|
||||
ReleaseRelationBuffers(rel);
|
||||
|
@ -1300,17 +1312,17 @@ DeleteAttributeTuples(Relation rel)
|
|||
attnum++)
|
||||
{
|
||||
if (HeapTupleIsValid(tup = SearchSysCacheTupleCopy(ATTNUM,
|
||||
ObjectIdGetDatum(RelationGetRelid(rel)),
|
||||
Int16GetDatum(attnum),
|
||||
ObjectIdGetDatum(RelationGetRelid(rel)),
|
||||
Int16GetDatum(attnum),
|
||||
0, 0)))
|
||||
{
|
||||
|
||||
/*** Delete any comments associated with this attribute ***/
|
||||
|
||||
DeleteComments(tup->t_data->t_oid);
|
||||
/*** Delete any comments associated with this attribute ***/
|
||||
|
||||
heap_delete(pg_attribute_desc, &tup->t_self, NULL);
|
||||
heap_freetuple(tup);
|
||||
DeleteComments(tup->t_data->t_oid);
|
||||
|
||||
heap_delete(pg_attribute_desc, &tup->t_self, NULL);
|
||||
heap_freetuple(tup);
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -1429,7 +1441,7 @@ DeleteTypeTuple(Relation rel)
|
|||
* we release the read lock on pg_type. -mer 13 Aug 1991
|
||||
* ----------------
|
||||
*/
|
||||
|
||||
|
||||
heap_delete(pg_type_desc, &tup->t_self, NULL);
|
||||
|
||||
heap_endscan(pg_type_scan);
|
||||
|
@ -1477,7 +1489,7 @@ heap_drop_with_catalog(const char *relname)
|
|||
* they don't exist anyway. So, no warning in that case.
|
||||
* ----------------
|
||||
*/
|
||||
if (IsTransactionBlock() && ! rel->rd_myxactonly)
|
||||
if (IsTransactionBlock() && !rel->rd_myxactonly)
|
||||
elog(NOTICE, "Caution: DROP TABLE cannot be rolled back, so don't abort now");
|
||||
|
||||
/* ----------------
|
||||
|
@ -1547,8 +1559,8 @@ heap_drop_with_catalog(const char *relname)
|
|||
|
||||
/*
|
||||
* Close relcache entry, but *keep* AccessExclusiveLock on the
|
||||
* relation until transaction commit. This ensures no one else
|
||||
* will try to do something with the doomed relation.
|
||||
* relation until transaction commit. This ensures no one else will
|
||||
* try to do something with the doomed relation.
|
||||
*/
|
||||
heap_close(rel, NoLock);
|
||||
|
||||
|
@ -1704,7 +1716,7 @@ StoreAttrDefault(Relation rel, AttrNumber attnum, char *adbin,
|
|||
Relation idescs[Num_pg_attrdef_indices];
|
||||
HeapTuple tuple;
|
||||
Datum values[4];
|
||||
static char nulls[4] = {' ', ' ', ' ', ' '};
|
||||
static char nulls[4] = {' ', ' ', ' ', ' '};
|
||||
Relation attrrel;
|
||||
Relation attridescs[Num_pg_attr_indices];
|
||||
HeapTuple atttup;
|
||||
|
@ -1714,6 +1726,7 @@ StoreAttrDefault(Relation rel, AttrNumber attnum, char *adbin,
|
|||
* Need to construct source equivalent of given node-string.
|
||||
*/
|
||||
expr = stringToNode(adbin);
|
||||
|
||||
/*
|
||||
* deparse_expression needs a RangeTblEntry list, so make one
|
||||
*/
|
||||
|
@ -1747,18 +1760,18 @@ StoreAttrDefault(Relation rel, AttrNumber attnum, char *adbin,
|
|||
heap_freetuple(tuple);
|
||||
pfree(adsrc);
|
||||
|
||||
if (! updatePgAttribute)
|
||||
if (!updatePgAttribute)
|
||||
return; /* done if pg_attribute is OK */
|
||||
|
||||
attrrel = heap_openr(AttributeRelationName, RowExclusiveLock);
|
||||
atttup = SearchSysCacheTupleCopy(ATTNUM,
|
||||
ObjectIdGetDatum(RelationGetRelid(rel)),
|
||||
ObjectIdGetDatum(RelationGetRelid(rel)),
|
||||
(Datum) attnum, 0, 0);
|
||||
if (!HeapTupleIsValid(atttup))
|
||||
elog(ERROR, "cache lookup of attribute %d in relation %u failed",
|
||||
attnum, RelationGetRelid(rel));
|
||||
attStruct = (Form_pg_attribute) GETSTRUCT(atttup);
|
||||
if (! attStruct->atthasdef)
|
||||
if (!attStruct->atthasdef)
|
||||
{
|
||||
attStruct->atthasdef = true;
|
||||
heap_update(attrrel, &atttup->t_self, atttup, NULL);
|
||||
|
@ -1789,13 +1802,14 @@ StoreRelCheck(Relation rel, char *ccname, char *ccbin)
|
|||
Relation idescs[Num_pg_relcheck_indices];
|
||||
HeapTuple tuple;
|
||||
Datum values[4];
|
||||
static char nulls[4] = {' ', ' ', ' ', ' '};
|
||||
static char nulls[4] = {' ', ' ', ' ', ' '};
|
||||
|
||||
/*
|
||||
* Convert condition to a normal boolean expression tree.
|
||||
*/
|
||||
expr = stringToNode(ccbin);
|
||||
expr = (Node *) make_ands_explicit((List *) expr);
|
||||
|
||||
/*
|
||||
* deparse_expression needs a RangeTblEntry list, so make one
|
||||
*/
|
||||
|
@ -1850,9 +1864,10 @@ StoreConstraints(Relation rel)
|
|||
if (!constr)
|
||||
return;
|
||||
|
||||
/* deparsing of constraint expressions will fail unless the just-created
|
||||
* pg_attribute tuples for this relation are made visible. So, bump
|
||||
* the command counter.
|
||||
/*
|
||||
* deparsing of constraint expressions will fail unless the
|
||||
* just-created pg_attribute tuples for this relation are made
|
||||
* visible. So, bump the command counter.
|
||||
*/
|
||||
CommandCounterIncrement();
|
||||
|
||||
|
@ -1882,7 +1897,7 @@ StoreConstraints(Relation rel)
|
|||
* expression.
|
||||
*
|
||||
* NB: caller should have opened rel with AccessExclusiveLock, and should
|
||||
* hold that lock till end of transaction. Also, we assume the caller has
|
||||
* hold that lock till end of transaction. Also, we assume the caller has
|
||||
* done a CommandCounterIncrement if necessary to make the relation's catalog
|
||||
* tuples visible.
|
||||
*/
|
||||
|
@ -1921,8 +1936,8 @@ AddRelationRawConstraints(Relation rel,
|
|||
}
|
||||
|
||||
/*
|
||||
* Create a dummy ParseState and insert the target relation as
|
||||
* its sole rangetable entry. We need a ParseState for transformExpr.
|
||||
* Create a dummy ParseState and insert the target relation as its
|
||||
* sole rangetable entry. We need a ParseState for transformExpr.
|
||||
*/
|
||||
pstate = make_parsestate(NULL);
|
||||
makeRangeTable(pstate, NULL);
|
||||
|
@ -1938,25 +1953,28 @@ AddRelationRawConstraints(Relation rel,
|
|||
Oid type_id;
|
||||
|
||||
Assert(colDef->raw_default != NULL);
|
||||
|
||||
/*
|
||||
* Transform raw parsetree to executable expression.
|
||||
*/
|
||||
expr = transformExpr(pstate, colDef->raw_default, EXPR_COLUMN_FIRST);
|
||||
|
||||
/*
|
||||
* Make sure default expr does not refer to any vars.
|
||||
*/
|
||||
if (contain_var_clause(expr))
|
||||
elog(ERROR, "Cannot use attribute(s) in DEFAULT clause");
|
||||
|
||||
/*
|
||||
* Check that it will be possible to coerce the expression
|
||||
* to the column's type. We store the expression without
|
||||
* coercion, however, to avoid premature coercion in cases like
|
||||
* Check that it will be possible to coerce the expression to the
|
||||
* column's type. We store the expression without coercion,
|
||||
* however, to avoid premature coercion in cases like
|
||||
*
|
||||
* CREATE TABLE tbl (fld datetime DEFAULT 'now');
|
||||
*
|
||||
* NB: this should match the code in updateTargetListEntry()
|
||||
* that will actually do the coercion, to ensure we don't accept
|
||||
* an unusable default expression.
|
||||
* NB: this should match the code in updateTargetListEntry() that
|
||||
* will actually do the coercion, to ensure we don't accept an
|
||||
* unusable default expression.
|
||||
*/
|
||||
type_id = exprType(expr);
|
||||
if (type_id != InvalidOid)
|
||||
|
@ -1966,23 +1984,26 @@ AddRelationRawConstraints(Relation rel,
|
|||
if (type_id != atp->atttypid)
|
||||
{
|
||||
if (CoerceTargetExpr(NULL, expr, type_id,
|
||||
atp->atttypid, atp->atttypmod) == NULL)
|
||||
atp->atttypid, atp->atttypmod) == NULL)
|
||||
elog(ERROR, "Attribute '%s' is of type '%s'"
|
||||
" but default expression is of type '%s'"
|
||||
"\n\tYou will need to rewrite or cast the expression",
|
||||
"\n\tYou will need to rewrite or cast the expression",
|
||||
NameStr(atp->attname),
|
||||
typeidTypeName(atp->atttypid),
|
||||
typeidTypeName(type_id));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Might as well try to reduce any constant expressions.
|
||||
*/
|
||||
expr = eval_const_expressions(expr);
|
||||
|
||||
/*
|
||||
* Must fix opids, in case any operators remain...
|
||||
*/
|
||||
fix_opids(expr);
|
||||
|
||||
/*
|
||||
* OK, store it.
|
||||
*/
|
||||
|
@ -2037,26 +2058,31 @@ AddRelationRawConstraints(Relation rel,
|
|||
ccname = (char *) palloc(NAMEDATALEN);
|
||||
snprintf(ccname, NAMEDATALEN, "$%d", numchecks + 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Transform raw parsetree to executable expression.
|
||||
*/
|
||||
expr = transformExpr(pstate, cdef->raw_expr, EXPR_COLUMN_FIRST);
|
||||
|
||||
/*
|
||||
* Make sure it yields a boolean result.
|
||||
*/
|
||||
if (exprType(expr) != BOOLOID)
|
||||
elog(ERROR, "CHECK '%s' does not yield boolean result",
|
||||
ccname);
|
||||
|
||||
/*
|
||||
* Make sure no outside relations are referred to.
|
||||
*/
|
||||
if (length(pstate->p_rtable) != 1)
|
||||
elog(ERROR, "Only relation '%s' can be referenced in CHECK",
|
||||
relname);
|
||||
|
||||
/*
|
||||
* Might as well try to reduce any constant expressions.
|
||||
*/
|
||||
expr = eval_const_expressions(expr);
|
||||
|
||||
/*
|
||||
* Constraints are evaluated with execQual, which expects an
|
||||
* implicit-AND list, so convert expression to implicit-AND form.
|
||||
|
@ -2064,10 +2090,12 @@ AddRelationRawConstraints(Relation rel,
|
|||
* overkill...)
|
||||
*/
|
||||
expr = (Node *) make_ands_implicit((Expr *) expr);
|
||||
|
||||
/*
|
||||
* Must fix opids in operator clauses.
|
||||
*/
|
||||
fix_opids(expr);
|
||||
|
||||
/*
|
||||
* OK, store it.
|
||||
*/
|
||||
|
@ -2081,12 +2109,12 @@ AddRelationRawConstraints(Relation rel,
|
|||
* We do this even if there was no change, in order to ensure that an
|
||||
* SI update message is sent out for the pg_class tuple, which will
|
||||
* force other backends to rebuild their relcache entries for the rel.
|
||||
* (Of course, for a newly created rel there is no need for an SI message,
|
||||
* but for ALTER TABLE ADD ATTRIBUTE this'd be important.)
|
||||
* (Of course, for a newly created rel there is no need for an SI
|
||||
* message, but for ALTER TABLE ADD ATTRIBUTE this'd be important.)
|
||||
*/
|
||||
relrel = heap_openr(RelationRelationName, RowExclusiveLock);
|
||||
reltup = SearchSysCacheTupleCopy(RELOID,
|
||||
ObjectIdGetDatum(RelationGetRelid(rel)),
|
||||
ObjectIdGetDatum(RelationGetRelid(rel)),
|
||||
0, 0, 0);
|
||||
if (!HeapTupleIsValid(reltup))
|
||||
elog(ERROR, "cache lookup of relation %u failed", RelationGetRelid(rel));
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.107 2000/03/01 05:39:24 inoue Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.108 2000/04/12 17:14:55 momjian Exp $
|
||||
*
|
||||
*
|
||||
* INTERFACE ROUTINES
|
||||
|
@ -56,37 +56,41 @@
|
|||
|
||||
/* non-export function prototypes */
|
||||
static Oid GetHeapRelationOid(char *heapRelationName, char *indexRelationName,
|
||||
bool istemp);
|
||||
bool istemp);
|
||||
static TupleDesc BuildFuncTupleDesc(FuncIndexInfo *funcInfo);
|
||||
static TupleDesc ConstructTupleDescriptor(Oid heapoid, Relation heapRelation,
|
||||
List *attributeList, int numatts, AttrNumber *attNums);
|
||||
List *attributeList, int numatts, AttrNumber *attNums);
|
||||
|
||||
static void ConstructIndexReldesc(Relation indexRelation, Oid amoid);
|
||||
static Oid UpdateRelationRelation(Relation indexRelation, char *temp_relname);
|
||||
static void InitializeAttributeOids(Relation indexRelation,
|
||||
int numatts, Oid indexoid);
|
||||
int numatts, Oid indexoid);
|
||||
static void AppendAttributeTuples(Relation indexRelation, int numatts);
|
||||
static void UpdateIndexRelation(Oid indexoid, Oid heapoid,
|
||||
FuncIndexInfo *funcInfo, int natts,
|
||||
AttrNumber *attNums, Oid *classOids, Node *predicate,
|
||||
List *attributeList, bool islossy, bool unique, bool primary);
|
||||
FuncIndexInfo *funcInfo, int natts,
|
||||
AttrNumber *attNums, Oid *classOids, Node *predicate,
|
||||
List *attributeList, bool islossy, bool unique, bool primary);
|
||||
static void DefaultBuild(Relation heapRelation, Relation indexRelation,
|
||||
int numberOfAttributes, AttrNumber *attributeNumber,
|
||||
IndexStrategy indexStrategy, uint16 parameterCount,
|
||||
int numberOfAttributes, AttrNumber *attributeNumber,
|
||||
IndexStrategy indexStrategy, uint16 parameterCount,
|
||||
Datum *parameter, FuncIndexInfoPtr funcInfo, PredInfo *predInfo);
|
||||
static Oid IndexGetRelation(Oid indexId);
|
||||
static Oid IndexGetRelation(Oid indexId);
|
||||
|
||||
static bool reindexing = false;
|
||||
extern bool SetReindexProcessing(bool reindexmode)
|
||||
static bool reindexing = false;
|
||||
extern bool
|
||||
SetReindexProcessing(bool reindexmode)
|
||||
{
|
||||
bool old = reindexing;
|
||||
bool old = reindexing;
|
||||
|
||||
reindexing = reindexmode;
|
||||
return old;
|
||||
}
|
||||
extern bool IsReindexProcessing(void)
|
||||
extern bool
|
||||
IsReindexProcessing(void)
|
||||
{
|
||||
return reindexing;
|
||||
}
|
||||
|
||||
/* ----------------------------------------------------------------
|
||||
* sysatts is a structure containing attribute tuple forms
|
||||
* for system attributes (numbered -1, -2, ...). This really
|
||||
|
@ -1011,7 +1015,7 @@ index_create(char *heapRelationName,
|
|||
* ----------------
|
||||
*/
|
||||
indexRelation = heap_create(indexRelationName,
|
||||
indexTupDesc, false, istemp, false);
|
||||
indexTupDesc, false, istemp, false);
|
||||
|
||||
/* ----------------
|
||||
* construct the index relation descriptor
|
||||
|
@ -1075,9 +1079,9 @@ index_create(char *heapRelationName,
|
|||
* bootstrapping. Otherwise, we call the routine that constructs the
|
||||
* index.
|
||||
*
|
||||
* In normal processing mode, the heap and index relations are closed
|
||||
* by index_build() --- but we continue to hold the ShareLock on the
|
||||
* heap that we acquired above, until end of transaction.
|
||||
* In normal processing mode, the heap and index relations are closed by
|
||||
* index_build() --- but we continue to hold the ShareLock on the heap
|
||||
* that we acquired above, until end of transaction.
|
||||
*/
|
||||
if (IsBootstrapProcessingMode())
|
||||
{
|
||||
|
@ -1139,7 +1143,7 @@ index_drop(Oid indexId)
|
|||
* they don't exist anyway. So, no warning in that case.
|
||||
* ----------------
|
||||
*/
|
||||
if (IsTransactionBlock() && ! userIndexRelation->rd_myxactonly)
|
||||
if (IsTransactionBlock() && !userIndexRelation->rd_myxactonly)
|
||||
elog(NOTICE, "Caution: DROP INDEX cannot be rolled back, so don't abort now");
|
||||
|
||||
/* ----------------
|
||||
|
@ -1147,7 +1151,7 @@ index_drop(Oid indexId)
|
|||
* ----------------
|
||||
*/
|
||||
DeleteComments(indexId);
|
||||
|
||||
|
||||
/* ----------------
|
||||
* fix RELATION relation
|
||||
* ----------------
|
||||
|
@ -1267,15 +1271,16 @@ FormIndexDatum(int numberOfAttributes,
|
|||
* --------------------------------------------
|
||||
*/
|
||||
static
|
||||
bool LockClassinfoForUpdate(Oid relid, HeapTuple rtup, Buffer *buffer, bool confirmCommitted)
|
||||
bool
|
||||
LockClassinfoForUpdate(Oid relid, HeapTuple rtup, Buffer *buffer, bool confirmCommitted)
|
||||
{
|
||||
HeapTuple classTuple;
|
||||
Form_pg_class pgcform;
|
||||
Form_pg_class pgcform;
|
||||
bool test;
|
||||
Relation relationRelation;
|
||||
|
||||
classTuple = SearchSysCacheTuple(RELOID, PointerGetDatum(relid),
|
||||
0, 0, 0);
|
||||
0, 0, 0);
|
||||
if (!HeapTupleIsValid(classTuple))
|
||||
return false;
|
||||
rtup->t_self = classTuple->t_self;
|
||||
|
@ -1294,7 +1299,8 @@ bool LockClassinfoForUpdate(Oid relid, HeapTuple rtup, Buffer *buffer, bool conf
|
|||
RelationInvalidateHeapTuple(relationRelation, rtup);
|
||||
if (confirmCommitted)
|
||||
{
|
||||
HeapTupleHeader th = rtup->t_data;
|
||||
HeapTupleHeader th = rtup->t_data;
|
||||
|
||||
if (!(th->t_infomask & HEAP_XMIN_COMMITTED))
|
||||
elog(ERROR, "The tuple isn't committed");
|
||||
if (th->t_infomask & HEAP_XMAX_COMMITTED)
|
||||
|
@ -1309,28 +1315,29 @@ bool LockClassinfoForUpdate(Oid relid, HeapTuple rtup, Buffer *buffer, bool conf
|
|||
* Indexes of the relation active ?
|
||||
* ---------------------------------------------
|
||||
*/
|
||||
bool IndexesAreActive(Oid relid, bool confirmCommitted)
|
||||
bool
|
||||
IndexesAreActive(Oid relid, bool confirmCommitted)
|
||||
{
|
||||
HeapTupleData tuple;
|
||||
HeapTupleData tuple;
|
||||
Relation indexRelation;
|
||||
Buffer buffer;
|
||||
HeapScanDesc scan;
|
||||
ScanKeyData entry;
|
||||
HeapScanDesc scan;
|
||||
ScanKeyData entry;
|
||||
bool isactive;
|
||||
|
||||
if (!LockClassinfoForUpdate(relid, &tuple, &buffer, confirmCommitted))
|
||||
elog(ERROR, "IndexesAreActive couldn't lock %u", relid);
|
||||
if (((Form_pg_class) GETSTRUCT(&tuple))->relkind != RELKIND_RELATION)
|
||||
elog(ERROR, "relation %u isn't an relation", relid);
|
||||
elog(ERROR, "relation %u isn't an relation", relid);
|
||||
isactive = ((Form_pg_class) GETSTRUCT(&tuple))->relhasindex;
|
||||
ReleaseBuffer(buffer);
|
||||
if (isactive)
|
||||
return isactive;
|
||||
indexRelation = heap_openr(IndexRelationName, AccessShareLock);
|
||||
ScanKeyEntryInitialize(&entry, 0, Anum_pg_index_indrelid,
|
||||
F_OIDEQ, ObjectIdGetDatum(relid));
|
||||
F_OIDEQ, ObjectIdGetDatum(relid));
|
||||
scan = heap_beginscan(indexRelation, false, SnapshotNow,
|
||||
1, &entry);
|
||||
1, &entry);
|
||||
if (!heap_getnext(scan, 0))
|
||||
isactive = true;
|
||||
heap_endscan(scan);
|
||||
|
@ -1348,8 +1355,8 @@ setRelhasindexInplace(Oid relid, bool hasindex, bool immediate)
|
|||
Relation whichRel;
|
||||
Relation pg_class;
|
||||
HeapTuple tuple;
|
||||
Form_pg_class rd_rel;
|
||||
HeapScanDesc pg_class_scan = NULL;
|
||||
Form_pg_class rd_rel;
|
||||
HeapScanDesc pg_class_scan = NULL;
|
||||
|
||||
/* ----------------
|
||||
* This routine handles updates for only the heap relation
|
||||
|
@ -1384,7 +1391,7 @@ setRelhasindexInplace(Oid relid, bool hasindex, bool immediate)
|
|||
if (!IsIgnoringSystemIndexes())
|
||||
{
|
||||
tuple = SearchSysCacheTupleCopy(RELOID,
|
||||
ObjectIdGetDatum(relid), 0, 0, 0);
|
||||
ObjectIdGetDatum(relid), 0, 0, 0);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -1406,13 +1413,15 @@ setRelhasindexInplace(Oid relid, bool hasindex, bool immediate)
|
|||
heap_close(pg_class, RowExclusiveLock);
|
||||
elog(ERROR, "setRelhasindexInplace: cannot scan RELATION relation");
|
||||
}
|
||||
|
||||
/*
|
||||
* Confirm that target tuple is locked by this transaction
|
||||
* in case of immedaite updation.
|
||||
* Confirm that target tuple is locked by this transaction in case of
|
||||
* immedaite updation.
|
||||
*/
|
||||
if (immediate)
|
||||
{
|
||||
HeapTupleHeader th = tuple->t_data;
|
||||
HeapTupleHeader th = tuple->t_data;
|
||||
|
||||
if (!(th->t_infomask & HEAP_XMIN_COMMITTED))
|
||||
elog(ERROR, "Immediate hasindex updation can be done only for committed tuples %x", th->t_infomask);
|
||||
if (th->t_infomask & HEAP_XMAX_INVALID)
|
||||
|
@ -1447,7 +1456,7 @@ setRelhasindexInplace(Oid relid, bool hasindex, bool immediate)
|
|||
}
|
||||
else
|
||||
{
|
||||
HeapTupleData htup;
|
||||
HeapTupleData htup;
|
||||
Buffer buffer;
|
||||
|
||||
htup.t_self = tuple->t_self;
|
||||
|
@ -1485,7 +1494,7 @@ UpdateStats(Oid relid, long reltuples, bool inplace)
|
|||
Datum values[Natts_pg_class];
|
||||
char nulls[Natts_pg_class];
|
||||
char replace[Natts_pg_class];
|
||||
HeapScanDesc pg_class_scan = NULL;
|
||||
HeapScanDesc pg_class_scan = NULL;
|
||||
bool in_place_upd;
|
||||
|
||||
/* ----------------
|
||||
|
@ -1560,7 +1569,7 @@ UpdateStats(Oid relid, long reltuples, bool inplace)
|
|||
* pattern "CREATE TABLE; CREATE INDEX; insert data" leaves the table
|
||||
* with zero size statistics until a VACUUM is done. The optimizer will
|
||||
* generate very bad plans if the stats claim the table is empty when
|
||||
* it is actually sizable. See also CREATE TABLE in heap.c.
|
||||
* it is actually sizable. See also CREATE TABLE in heap.c.
|
||||
* ----------------
|
||||
*/
|
||||
relpages = RelationGetNumberOfBlocks(whichRel);
|
||||
|
@ -1697,10 +1706,12 @@ DefaultBuild(Relation heapRelation,
|
|||
char *nullv;
|
||||
long reltuples,
|
||||
indtuples;
|
||||
|
||||
#ifndef OMIT_PARTIAL_INDEX
|
||||
ExprContext *econtext;
|
||||
TupleTable tupleTable;
|
||||
TupleTableSlot *slot;
|
||||
|
||||
#endif
|
||||
Node *predicate;
|
||||
Node *oldPred;
|
||||
|
@ -1781,6 +1792,7 @@ DefaultBuild(Relation heapRelation,
|
|||
reltuples++;
|
||||
|
||||
#ifndef OMIT_PARTIAL_INDEX
|
||||
|
||||
/*
|
||||
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
|
||||
* this tuple if it was already in the existing partial index
|
||||
|
@ -1804,7 +1816,7 @@ DefaultBuild(Relation heapRelation,
|
|||
{
|
||||
/* SetSlotContents(slot, heapTuple); */
|
||||
slot->val = heapTuple;
|
||||
if (! ExecQual((List *) predicate, econtext, false))
|
||||
if (!ExecQual((List *) predicate, econtext, false))
|
||||
continue;
|
||||
}
|
||||
#endif /* OMIT_PARTIAL_INDEX */
|
||||
|
@ -1854,18 +1866,18 @@ DefaultBuild(Relation heapRelation,
|
|||
/*
|
||||
* Since we just counted the tuples in the heap, we update its stats
|
||||
* in pg_class to guarantee that the planner takes advantage of the
|
||||
* index we just created. But, only update statistics during
|
||||
* normal index definitions, not for indices on system catalogs
|
||||
* created during bootstrap processing. We must close the relations
|
||||
* before updating statistics to guarantee that the relcache entries
|
||||
* are flushed when we increment the command counter in UpdateStats().
|
||||
* But we do not release any locks on the relations; those will be
|
||||
* held until end of transaction.
|
||||
* index we just created. But, only update statistics during normal
|
||||
* index definitions, not for indices on system catalogs created
|
||||
* during bootstrap processing. We must close the relations before
|
||||
* updating statistics to guarantee that the relcache entries are
|
||||
* flushed when we increment the command counter in UpdateStats(). But
|
||||
* we do not release any locks on the relations; those will be held
|
||||
* until end of transaction.
|
||||
*/
|
||||
if (IsNormalProcessingMode())
|
||||
{
|
||||
Oid hrelid = RelationGetRelid(heapRelation);
|
||||
Oid irelid = RelationGetRelid(indexRelation);
|
||||
Oid hrelid = RelationGetRelid(heapRelation);
|
||||
Oid irelid = RelationGetRelid(indexRelation);
|
||||
bool inplace = IsReindexProcessing();
|
||||
|
||||
heap_close(heapRelation, NoLock);
|
||||
|
@ -1936,7 +1948,7 @@ index_build(Relation heapRelation,
|
|||
|
||||
/*
|
||||
* IndexGetRelation: given an index's relation OID, get the OID of the
|
||||
* relation it is an index on. Uses the system cache.
|
||||
* relation it is an index on. Uses the system cache.
|
||||
*/
|
||||
static Oid
|
||||
IndexGetRelation(Oid indexId)
|
||||
|
@ -2037,11 +2049,11 @@ IndexIsUniqueNoCache(Oid indexId)
|
|||
bool
|
||||
activate_index(Oid indexId, bool activate)
|
||||
{
|
||||
if (!activate) /* Currently does nothing */
|
||||
if (!activate) /* Currently does nothing */
|
||||
return true;
|
||||
return reindex_index(indexId, false);
|
||||
}
|
||||
|
||||
|
||||
/* --------------------------------
|
||||
* reindex_index - This routine is used to recreate an index
|
||||
* --------------------------------
|
||||
|
@ -2049,18 +2061,26 @@ activate_index(Oid indexId, bool activate)
|
|||
bool
|
||||
reindex_index(Oid indexId, bool force)
|
||||
{
|
||||
Relation iRel, indexRelation, heapRelation;
|
||||
ScanKeyData entry;
|
||||
HeapScanDesc scan;
|
||||
HeapTuple indexTuple, procTuple, classTuple;
|
||||
Form_pg_index index;
|
||||
Oid heapId, procId, accessMethodId;
|
||||
Node *oldPred = NULL;
|
||||
PredInfo *predInfo;
|
||||
AttrNumber *attributeNumberA;
|
||||
FuncIndexInfo fInfo, *funcInfo = NULL;
|
||||
int i, numberOfAttributes;
|
||||
char *predString;
|
||||
Relation iRel,
|
||||
indexRelation,
|
||||
heapRelation;
|
||||
ScanKeyData entry;
|
||||
HeapScanDesc scan;
|
||||
HeapTuple indexTuple,
|
||||
procTuple,
|
||||
classTuple;
|
||||
Form_pg_index index;
|
||||
Oid heapId,
|
||||
procId,
|
||||
accessMethodId;
|
||||
Node *oldPred = NULL;
|
||||
PredInfo *predInfo;
|
||||
AttrNumber *attributeNumberA;
|
||||
FuncIndexInfo fInfo,
|
||||
*funcInfo = NULL;
|
||||
int i,
|
||||
numberOfAttributes;
|
||||
char *predString;
|
||||
bool old;
|
||||
|
||||
old = SetReindexProcessing(true);
|
||||
|
@ -2135,7 +2155,7 @@ reindex_index(Oid indexId, bool force)
|
|||
LockRelation(iRel, AccessExclusiveLock);
|
||||
|
||||
/*
|
||||
* Release any buffers associated with this index. If they're dirty,
|
||||
* Release any buffers associated with this index. If they're dirty,
|
||||
* they're just dropped without bothering to flush to disk.
|
||||
*/
|
||||
ReleaseRelationBuffers(iRel);
|
||||
|
@ -2149,14 +2169,13 @@ reindex_index(Oid indexId, bool force)
|
|||
/* Initialize the index and rebuild */
|
||||
InitIndexStrategy(numberOfAttributes, iRel, accessMethodId);
|
||||
index_build(heapRelation, iRel, numberOfAttributes,
|
||||
attributeNumberA, 0, NULL, funcInfo, predInfo);
|
||||
attributeNumberA, 0, NULL, funcInfo, predInfo);
|
||||
|
||||
/*
|
||||
* index_build will close both the heap and index relations
|
||||
* (but not give up the locks we hold on them). That's fine
|
||||
* for the index, but we need to open the heap again. We need
|
||||
* no new lock, since this backend still has the exclusive lock
|
||||
* grabbed by heap_truncate.
|
||||
* index_build will close both the heap and index relations (but not
|
||||
* give up the locks we hold on them). That's fine for the index, but
|
||||
* we need to open the heap again. We need no new lock, since this
|
||||
* backend still has the exclusive lock grabbed by heap_truncate.
|
||||
*/
|
||||
iRel = index_open(indexId);
|
||||
Assert(iRel != NULL);
|
||||
|
@ -2170,7 +2189,7 @@ reindex_index(Oid indexId, bool force)
|
|||
|
||||
/*
|
||||
* ----------------------------
|
||||
* activate_indexes_of_a_table
|
||||
* activate_indexes_of_a_table
|
||||
* activate/deactivate indexes of the specified table.
|
||||
* ----------------------------
|
||||
*/
|
||||
|
@ -2182,21 +2201,18 @@ activate_indexes_of_a_table(Oid relid, bool activate)
|
|||
if (!activate)
|
||||
setRelhasindexInplace(relid, false, true);
|
||||
else
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (activate)
|
||||
reindex_relation(relid, false);
|
||||
else
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/* --------------------------------
|
||||
* reindex_relation - This routine is used to recreate indexes
|
||||
* of a relation.
|
||||
|
@ -2206,10 +2222,11 @@ bool
|
|||
reindex_relation(Oid relid, bool force)
|
||||
{
|
||||
Relation indexRelation;
|
||||
ScanKeyData entry;
|
||||
HeapScanDesc scan;
|
||||
ScanKeyData entry;
|
||||
HeapScanDesc scan;
|
||||
HeapTuple indexTuple;
|
||||
bool old, reindexed;
|
||||
bool old,
|
||||
reindexed;
|
||||
|
||||
old = SetReindexProcessing(true);
|
||||
if (IndexesAreActive(relid, true))
|
||||
|
@ -2224,13 +2241,14 @@ reindex_relation(Oid relid, bool force)
|
|||
|
||||
indexRelation = heap_openr(IndexRelationName, AccessShareLock);
|
||||
ScanKeyEntryInitialize(&entry, 0, Anum_pg_index_indrelid,
|
||||
F_OIDEQ, ObjectIdGetDatum(relid));
|
||||
F_OIDEQ, ObjectIdGetDatum(relid));
|
||||
scan = heap_beginscan(indexRelation, false, SnapshotNow,
|
||||
1, &entry);
|
||||
1, &entry);
|
||||
reindexed = false;
|
||||
while (HeapTupleIsValid(indexTuple = heap_getnext(scan, 0)))
|
||||
{
|
||||
Form_pg_index index = (Form_pg_index) GETSTRUCT(indexTuple);
|
||||
Form_pg_index index = (Form_pg_index) GETSTRUCT(indexTuple);
|
||||
|
||||
if (activate_index(index->indexrelid, true))
|
||||
reindexed = true;
|
||||
else
|
||||
|
@ -2242,9 +2260,7 @@ reindex_relation(Oid relid, bool force)
|
|||
heap_endscan(scan);
|
||||
heap_close(indexRelation, AccessShareLock);
|
||||
if (reindexed)
|
||||
{
|
||||
setRelhasindexInplace(relid, true, false);
|
||||
}
|
||||
SetReindexProcessing(old);
|
||||
return reindexed;
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.59 2000/02/18 09:28:41 inoue Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.60 2000/04/12 17:14:56 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -31,54 +31,54 @@
|
|||
*/
|
||||
|
||||
char *Name_pg_aggregate_indices[Num_pg_aggregate_indices] =
|
||||
{AggregateNameTypeIndex};
|
||||
{AggregateNameTypeIndex};
|
||||
char *Name_pg_am_indices[Num_pg_am_indices] =
|
||||
{AmNameIndex};
|
||||
{AmNameIndex};
|
||||
char *Name_pg_amop_indices[Num_pg_amop_indices] =
|
||||
{AccessMethodOpidIndex, AccessMethodStrategyIndex};
|
||||
{AccessMethodOpidIndex, AccessMethodStrategyIndex};
|
||||
char *Name_pg_attr_indices[Num_pg_attr_indices] =
|
||||
{AttributeRelidNameIndex, AttributeRelidNumIndex};
|
||||
{AttributeRelidNameIndex, AttributeRelidNumIndex};
|
||||
char *Name_pg_attrdef_indices[Num_pg_attrdef_indices] =
|
||||
{AttrDefaultIndex};
|
||||
{AttrDefaultIndex};
|
||||
char *Name_pg_class_indices[Num_pg_class_indices] =
|
||||
{ClassNameIndex, ClassOidIndex};
|
||||
{ClassNameIndex, ClassOidIndex};
|
||||
char *Name_pg_group_indices[Num_pg_group_indices] =
|
||||
{GroupNameIndex, GroupSysidIndex};
|
||||
{GroupNameIndex, GroupSysidIndex};
|
||||
char *Name_pg_index_indices[Num_pg_index_indices] =
|
||||
{IndexRelidIndex};
|
||||
{IndexRelidIndex};
|
||||
char *Name_pg_inherits_indices[Num_pg_inherits_indices] =
|
||||
{InheritsRelidSeqnoIndex};
|
||||
{InheritsRelidSeqnoIndex};
|
||||
char *Name_pg_language_indices[Num_pg_language_indices] =
|
||||
{LanguageOidIndex, LanguageNameIndex};
|
||||
{LanguageOidIndex, LanguageNameIndex};
|
||||
char *Name_pg_listener_indices[Num_pg_listener_indices] =
|
||||
{ListenerRelnamePidIndex};
|
||||
{ListenerRelnamePidIndex};
|
||||
char *Name_pg_opclass_indices[Num_pg_opclass_indices] =
|
||||
{OpclassNameIndex, OpclassDeftypeIndex};
|
||||
{OpclassNameIndex, OpclassDeftypeIndex};
|
||||
char *Name_pg_operator_indices[Num_pg_operator_indices] =
|
||||
{OperatorOidIndex, OperatorNameIndex};
|
||||
{OperatorOidIndex, OperatorNameIndex};
|
||||
char *Name_pg_proc_indices[Num_pg_proc_indices] =
|
||||
{ProcedureOidIndex, ProcedureNameIndex};
|
||||
{ProcedureOidIndex, ProcedureNameIndex};
|
||||
char *Name_pg_relcheck_indices[Num_pg_relcheck_indices] =
|
||||
{RelCheckIndex};
|
||||
{RelCheckIndex};
|
||||
char *Name_pg_rewrite_indices[Num_pg_rewrite_indices] =
|
||||
{RewriteOidIndex, RewriteRulenameIndex};
|
||||
{RewriteOidIndex, RewriteRulenameIndex};
|
||||
char *Name_pg_shadow_indices[Num_pg_shadow_indices] =
|
||||
{ShadowNameIndex, ShadowSysidIndex};
|
||||
{ShadowNameIndex, ShadowSysidIndex};
|
||||
char *Name_pg_statistic_indices[Num_pg_statistic_indices] =
|
||||
{StatisticRelidAttnumIndex};
|
||||
{StatisticRelidAttnumIndex};
|
||||
char *Name_pg_trigger_indices[Num_pg_trigger_indices] =
|
||||
{TriggerRelidIndex, TriggerConstrNameIndex, TriggerConstrRelidIndex};
|
||||
{TriggerRelidIndex, TriggerConstrNameIndex, TriggerConstrRelidIndex};
|
||||
char *Name_pg_type_indices[Num_pg_type_indices] =
|
||||
{TypeNameIndex, TypeOidIndex};
|
||||
char *Name_pg_description_indices[Num_pg_description_indices] =
|
||||
{DescriptionObjIndex};
|
||||
{TypeNameIndex, TypeOidIndex};
|
||||
char *Name_pg_description_indices[Num_pg_description_indices] =
|
||||
{DescriptionObjIndex};
|
||||
|
||||
|
||||
|
||||
static HeapTuple CatalogIndexFetchTuple(Relation heapRelation,
|
||||
Relation idesc,
|
||||
ScanKey skey,
|
||||
int16 num_keys);
|
||||
Relation idesc,
|
||||
ScanKey skey,
|
||||
int16 num_keys);
|
||||
|
||||
|
||||
/*
|
||||
|
@ -279,7 +279,7 @@ CatalogIndexFetchTuple(Relation heapRelation,
|
|||
|
||||
|
||||
/*---------------------------------------------------------------------
|
||||
* Class-specific index lookups
|
||||
* Class-specific index lookups
|
||||
*---------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
|
@ -297,7 +297,7 @@ AggregateNameTypeIndexScan(Relation heapRelation, char *aggName, Oid aggType)
|
|||
Relation idesc;
|
||||
ScanKeyData skey[2];
|
||||
HeapTuple tuple;
|
||||
|
||||
|
||||
ScanKeyEntryInitialize(&skey[0],
|
||||
(bits16) 0x0,
|
||||
(AttrNumber) 1,
|
||||
|
@ -324,7 +324,7 @@ AmNameIndexScan(Relation heapRelation, char *amName)
|
|||
Relation idesc;
|
||||
ScanKeyData skey[1];
|
||||
HeapTuple tuple;
|
||||
|
||||
|
||||
ScanKeyEntryInitialize(&skey[0],
|
||||
(bits16) 0x0,
|
||||
(AttrNumber) 1,
|
||||
|
@ -414,8 +414,8 @@ AccessMethodStrategyIndexScan(Relation heapRelation,
|
|||
|
||||
HeapTuple
|
||||
AttributeRelidNameIndexScan(Relation heapRelation,
|
||||
Oid relid,
|
||||
char *attname)
|
||||
Oid relid,
|
||||
char *attname)
|
||||
{
|
||||
Relation idesc;
|
||||
ScanKeyData skey[2];
|
||||
|
@ -444,8 +444,8 @@ AttributeRelidNameIndexScan(Relation heapRelation,
|
|||
|
||||
HeapTuple
|
||||
AttributeRelidNumIndexScan(Relation heapRelation,
|
||||
Oid relid,
|
||||
AttrNumber attnum)
|
||||
Oid relid,
|
||||
AttrNumber attnum)
|
||||
{
|
||||
Relation idesc;
|
||||
ScanKeyData skey[2];
|
||||
|
@ -500,7 +500,7 @@ OpclassNameIndexScan(Relation heapRelation, char *opcName)
|
|||
Relation idesc;
|
||||
ScanKeyData skey[1];
|
||||
HeapTuple tuple;
|
||||
|
||||
|
||||
ScanKeyEntryInitialize(&skey[0],
|
||||
(bits16) 0x0,
|
||||
(AttrNumber) 1,
|
||||
|
@ -521,7 +521,7 @@ GroupNameIndexScan(Relation heapRelation, char *groName)
|
|||
Relation idesc;
|
||||
ScanKeyData skey[1];
|
||||
HeapTuple tuple;
|
||||
|
||||
|
||||
ScanKeyEntryInitialize(&skey[0],
|
||||
(bits16) 0x0,
|
||||
(AttrNumber) 1,
|
||||
|
@ -542,7 +542,7 @@ GroupSysidIndexScan(Relation heapRelation, int4 sysId)
|
|||
Relation idesc;
|
||||
ScanKeyData skey[1];
|
||||
HeapTuple tuple;
|
||||
|
||||
|
||||
ScanKeyEntryInitialize(&skey[0],
|
||||
(bits16) 0x0,
|
||||
(AttrNumber) 1,
|
||||
|
@ -581,8 +581,8 @@ IndexRelidIndexScan(Relation heapRelation, Oid relid)
|
|||
|
||||
HeapTuple
|
||||
InheritsRelidSeqnoIndexScan(Relation heapRelation,
|
||||
Oid relid,
|
||||
int4 seqno)
|
||||
Oid relid,
|
||||
int4 seqno)
|
||||
{
|
||||
Relation idesc;
|
||||
ScanKeyData skey[2];
|
||||
|
@ -615,7 +615,7 @@ LanguageNameIndexScan(Relation heapRelation, char *lanName)
|
|||
Relation idesc;
|
||||
ScanKeyData skey[1];
|
||||
HeapTuple tuple;
|
||||
|
||||
|
||||
ScanKeyEntryInitialize(&skey[0],
|
||||
(bits16) 0x0,
|
||||
(AttrNumber) 1,
|
||||
|
@ -658,7 +658,7 @@ ListenerRelnamePidIndexScan(Relation heapRelation, char *relName, int4 pid)
|
|||
Relation idesc;
|
||||
ScanKeyData skey[2];
|
||||
HeapTuple tuple;
|
||||
|
||||
|
||||
ScanKeyEntryInitialize(&skey[0],
|
||||
(bits16) 0x0,
|
||||
(AttrNumber) 1,
|
||||
|
@ -681,10 +681,10 @@ ListenerRelnamePidIndexScan(Relation heapRelation, char *relName, int4 pid)
|
|||
|
||||
HeapTuple
|
||||
OperatorNameIndexScan(Relation heapRelation,
|
||||
char *oprName,
|
||||
Oid oprLeft,
|
||||
Oid oprRight,
|
||||
char oprKind)
|
||||
char *oprName,
|
||||
Oid oprLeft,
|
||||
Oid oprRight,
|
||||
char oprKind)
|
||||
{
|
||||
Relation idesc;
|
||||
ScanKeyData skey[4];
|
||||
|
@ -810,7 +810,7 @@ ClassNameIndexScan(Relation heapRelation, char *relName)
|
|||
Relation idesc;
|
||||
ScanKeyData skey[1];
|
||||
HeapTuple tuple;
|
||||
|
||||
|
||||
ScanKeyEntryInitialize(&skey[0],
|
||||
(bits16) 0x0,
|
||||
(AttrNumber) 1,
|
||||
|
@ -853,7 +853,7 @@ RewriteRulenameIndexScan(Relation heapRelation, char *ruleName)
|
|||
Relation idesc;
|
||||
ScanKeyData skey[1];
|
||||
HeapTuple tuple;
|
||||
|
||||
|
||||
ScanKeyEntryInitialize(&skey[0],
|
||||
(bits16) 0x0,
|
||||
(AttrNumber) 1,
|
||||
|
@ -896,7 +896,7 @@ ShadowNameIndexScan(Relation heapRelation, char *useName)
|
|||
Relation idesc;
|
||||
ScanKeyData skey[1];
|
||||
HeapTuple tuple;
|
||||
|
||||
|
||||
ScanKeyEntryInitialize(&skey[0],
|
||||
(bits16) 0x0,
|
||||
(AttrNumber) 1,
|
||||
|
@ -917,7 +917,7 @@ ShadowSysidIndexScan(Relation heapRelation, int4 sysId)
|
|||
Relation idesc;
|
||||
ScanKeyData skey[1];
|
||||
HeapTuple tuple;
|
||||
|
||||
|
||||
ScanKeyEntryInitialize(&skey[0],
|
||||
(bits16) 0x0,
|
||||
(AttrNumber) 1,
|
||||
|
@ -934,8 +934,8 @@ ShadowSysidIndexScan(Relation heapRelation, int4 sysId)
|
|||
|
||||
HeapTuple
|
||||
StatisticRelidAttnumIndexScan(Relation heapRelation,
|
||||
Oid relId,
|
||||
AttrNumber attNum)
|
||||
Oid relId,
|
||||
AttrNumber attNum)
|
||||
{
|
||||
Relation idesc;
|
||||
ScanKeyData skey[2];
|
||||
|
@ -1004,4 +1004,3 @@ TypeOidIndexScan(Relation heapRelation, Oid typeId)
|
|||
|
||||
return tuple;
|
||||
}
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_aggregate.c,v 1.30 2000/03/26 19:43:58 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_aggregate.c,v 1.31 2000/04/12 17:14:56 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -168,7 +168,7 @@ AggregateCreate(char *aggName,
|
|||
/* handle finalfn */
|
||||
if (aggfinalfnName)
|
||||
{
|
||||
int nargs = 0;
|
||||
int nargs = 0;
|
||||
|
||||
if (OidIsValid(xret1))
|
||||
fnArgs[nargs++] = xret1;
|
||||
|
@ -184,7 +184,7 @@ AggregateCreate(char *aggName,
|
|||
{
|
||||
if (nargs == 2)
|
||||
elog(ERROR, "AggregateCreate: '%s'('%s','%s') does not exist",
|
||||
aggfinalfnName, aggtransfn1typeName, aggtransfn2typeName);
|
||||
aggfinalfnName, aggtransfn1typeName, aggtransfn2typeName);
|
||||
else if (OidIsValid(xret1))
|
||||
elog(ERROR, "AggregateCreate: '%s'('%s') does not exist",
|
||||
aggfinalfnName, aggtransfn1typeName);
|
||||
|
@ -200,8 +200,10 @@ AggregateCreate(char *aggName,
|
|||
}
|
||||
else
|
||||
{
|
||||
/* If no finalfn, aggregate result type is type of the sole
|
||||
* state value (we already checked there is only one)
|
||||
|
||||
/*
|
||||
* If no finalfn, aggregate result type is type of the sole state
|
||||
* value (we already checked there is only one)
|
||||
*/
|
||||
if (OidIsValid(xret1))
|
||||
fret = xret1;
|
||||
|
@ -284,9 +286,9 @@ AggNameGetInitVal(char *aggName, Oid basetype, int xfuncno, bool *isNull)
|
|||
Assert(xfuncno == 1 || xfuncno == 2);
|
||||
|
||||
/*
|
||||
* since we will have to use fastgetattr (in case one or both init vals
|
||||
* are NULL), we will need to open the relation. Do that first to
|
||||
* ensure we don't get a stale tuple from the cache.
|
||||
* since we will have to use fastgetattr (in case one or both init
|
||||
* vals are NULL), we will need to open the relation. Do that first
|
||||
* to ensure we don't get a stale tuple from the cache.
|
||||
*/
|
||||
|
||||
aggRel = heap_openr(AggregateRelationName, AccessShareLock);
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.41 2000/04/04 21:44:37 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.42 2000/04/12 17:14:56 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -154,7 +154,7 @@ ProcedureCreate(char *procedureName,
|
|||
return tup->t_data->t_oid;
|
||||
#else
|
||||
elog(ERROR, "lookup for procedure by source needs fix (Jan)");
|
||||
#endif /* SETS_FIXED */
|
||||
#endif /* SETS_FIXED */
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -245,7 +245,7 @@ ProcedureCreate(char *procedureName,
|
|||
prosrc = procedureName;
|
||||
if (fmgr_lookupByName(prosrc) == (func_ptr) NULL)
|
||||
elog(ERROR,
|
||||
"ProcedureCreate: there is no builtin function named \"%s\"",
|
||||
"ProcedureCreate: there is no builtin function named \"%s\"",
|
||||
prosrc);
|
||||
}
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.49 2000/01/26 05:56:11 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.50 2000/04/12 17:14:56 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -374,7 +374,7 @@ TypeCreate(char *typeName,
|
|||
values[i++] = (Datum) GetUserId(); /* 2 */
|
||||
values[i++] = (Datum) internalSize; /* 3 */
|
||||
values[i++] = (Datum) externalSize; /* 4 */
|
||||
values[i++] = (Datum) passedByValue;/* 5 */
|
||||
values[i++] = (Datum) passedByValue; /* 5 */
|
||||
values[i++] = (Datum) typeType; /* 6 */
|
||||
values[i++] = (Datum) (bool) 1; /* 7 */
|
||||
values[i++] = (Datum) typDelim; /* 8 */
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/_deadcode/Attic/recipe.c,v 1.10 2000/01/26 05:56:17 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/_deadcode/Attic/recipe.c,v 1.11 2000/04/12 17:15:06 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -702,7 +702,7 @@ getParamTypes(TgElement * elem, Oid *typev)
|
|||
if (parameterCount == FUNC_MAX_ARGS)
|
||||
{
|
||||
elog(ERROR,
|
||||
"getParamTypes: Ingredients cannot take > %d arguments",FUNC_MAX_ARGS);
|
||||
"getParamTypes: Ingredients cannot take > %d arguments", FUNC_MAX_ARGS);
|
||||
}
|
||||
t = elem->inTypes->val[j];
|
||||
if (strcmp(t, "opaque") == 0)
|
||||
|
@ -810,7 +810,7 @@ tg_parseSubQuery(TgRecipe * r, TgNode * n, TeeInfo * teeInfo)
|
|||
{
|
||||
TgElement *elem;
|
||||
char *funcName;
|
||||
Oid typev[FUNC_MAX_ARGS], /* eight arguments maximum */
|
||||
Oid typev[FUNC_MAX_ARGS], /* eight arguments maximum */
|
||||
relid;
|
||||
int i,
|
||||
parameterCount;
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.58 2000/01/26 05:56:12 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.59 2000/04/12 17:14:57 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -155,12 +155,13 @@ Async_Notify(char *relname)
|
|||
/* no point in making duplicate entries in the list ... */
|
||||
if (!AsyncExistsPendingNotify(relname))
|
||||
{
|
||||
|
||||
/*
|
||||
* We allocate list memory from the global malloc pool to ensure
|
||||
* that it will live until we want to use it. This is probably not
|
||||
* necessary any longer, since we will use it before the end of the
|
||||
* transaction. DLList only knows how to use malloc() anyway, but we
|
||||
* could probably palloc() the strings...
|
||||
* that it will live until we want to use it. This is probably
|
||||
* not necessary any longer, since we will use it before the end
|
||||
* of the transaction. DLList only knows how to use malloc()
|
||||
* anyway, but we could probably palloc() the strings...
|
||||
*/
|
||||
notifyName = strdup(relname);
|
||||
DLAddHead(pendingNotifies, DLNewElem(notifyName));
|
||||
|
@ -466,6 +467,7 @@ AtCommit_Notify()
|
|||
|
||||
if (listenerPID == MyProcPid)
|
||||
{
|
||||
|
||||
/*
|
||||
* Self-notify: no need to bother with table update.
|
||||
* Indeed, we *must not* clear the notification field in
|
||||
|
@ -491,6 +493,7 @@ AtCommit_Notify()
|
|||
*/
|
||||
if (kill(listenerPID, SIGUSR2) < 0)
|
||||
{
|
||||
|
||||
/*
|
||||
* Get rid of pg_listener entry if it refers to a PID
|
||||
* that no longer exists. Presumably, that backend
|
||||
|
@ -514,7 +517,7 @@ AtCommit_Notify()
|
|||
if (RelationGetForm(lRel)->relhasindex)
|
||||
{
|
||||
Relation idescs[Num_pg_listener_indices];
|
||||
|
||||
|
||||
CatalogOpenIndices(Num_pg_listener_indices, Name_pg_listener_indices, idescs);
|
||||
CatalogIndexInsert(idescs, Num_pg_listener_indices, lRel, rTuple);
|
||||
CatalogCloseIndices(Num_pg_listener_indices, idescs);
|
||||
|
@ -780,7 +783,7 @@ ProcessIncomingNotify(void)
|
|||
if (RelationGetForm(lRel)->relhasindex)
|
||||
{
|
||||
Relation idescs[Num_pg_listener_indices];
|
||||
|
||||
|
||||
CatalogOpenIndices(Num_pg_listener_indices, Name_pg_listener_indices, idescs);
|
||||
CatalogIndexInsert(idescs, Num_pg_listener_indices, lRel, rTuple);
|
||||
CatalogCloseIndices(Num_pg_listener_indices, idescs);
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.50 2000/01/26 05:56:13 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.51 2000/04/12 17:14:57 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -104,8 +104,8 @@ cluster(char *oldrelname, char *oldindexname)
|
|||
* Like vacuum, cluster spans transactions, so I'm going to handle it
|
||||
* in the same way: commit and restart transactions where needed.
|
||||
*
|
||||
* We grab exclusive access to the target rel and index for the
|
||||
* duration of the initial transaction.
|
||||
* We grab exclusive access to the target rel and index for the duration
|
||||
* of the initial transaction.
|
||||
*/
|
||||
|
||||
OldHeap = heap_openr(oldrelname, AccessExclusiveLock);
|
||||
|
@ -115,7 +115,7 @@ cluster(char *oldrelname, char *oldindexname)
|
|||
LockRelation(OldIndex, AccessExclusiveLock);
|
||||
OIDOldIndex = RelationGetRelid(OldIndex);
|
||||
|
||||
heap_close(OldHeap, NoLock); /* do NOT give up the locks */
|
||||
heap_close(OldHeap, NoLock);/* do NOT give up the locks */
|
||||
index_close(OldIndex);
|
||||
|
||||
/*
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.70 2000/03/09 05:00:23 inoue Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.71 2000/04/12 17:14:57 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* The PortalExecutorHeapMemory crap needs to be eliminated
|
||||
|
@ -51,7 +51,7 @@
|
|||
#include "access/genam.h"
|
||||
#include "optimizer/clauses.h"
|
||||
#include "../parser/parse.h"
|
||||
#endif /* _DROP_COLUMN_HACK__ */
|
||||
#endif /* _DROP_COLUMN_HACK__ */
|
||||
|
||||
/* ----------------
|
||||
* PortalExecutorHeapMemory stuff
|
||||
|
@ -262,7 +262,7 @@ PerformPortalClose(char *name, CommandDest dest)
|
|||
}
|
||||
|
||||
/* ----------------
|
||||
* AlterTableAddColumn
|
||||
* AlterTableAddColumn
|
||||
* (formerly known as PerformAddAttribute)
|
||||
*
|
||||
* adds an additional attribute to a relation
|
||||
|
@ -327,8 +327,8 @@ AlterTableAddColumn(const char *relationName,
|
|||
#endif
|
||||
|
||||
/*
|
||||
* Grab an exclusive lock on the target table, which we will NOT release
|
||||
* until end of transaction.
|
||||
* Grab an exclusive lock on the target table, which we will NOT
|
||||
* release until end of transaction.
|
||||
*/
|
||||
rel = heap_openr(relationName, AccessExclusiveLock);
|
||||
myrelid = RelationGetRelid(rel);
|
||||
|
@ -341,7 +341,7 @@ AlterTableAddColumn(const char *relationName,
|
|||
elog(ERROR, "Can't add a NOT NULL attribute to an existing relation");
|
||||
|
||||
if (colDef->raw_default || colDef->cooked_default)
|
||||
elog(ERROR, "Adding columns with defaults is not implemented.");
|
||||
elog(ERROR, "Adding columns with defaults is not implemented.");
|
||||
|
||||
|
||||
/*
|
||||
|
@ -370,7 +370,7 @@ AlterTableAddColumn(const char *relationName,
|
|||
*/
|
||||
foreach(child, children)
|
||||
{
|
||||
Oid childrelid = lfirsti(child);
|
||||
Oid childrelid = lfirsti(child);
|
||||
|
||||
if (childrelid == myrelid)
|
||||
continue;
|
||||
|
@ -514,13 +514,13 @@ static void drop_default(Oid relid, int16 attnum);
|
|||
*/
|
||||
void
|
||||
AlterTableAlterColumn(const char *relationName,
|
||||
bool inh, const char *colName,
|
||||
Node *newDefault)
|
||||
bool inh, const char *colName,
|
||||
Node *newDefault)
|
||||
{
|
||||
Relation rel;
|
||||
HeapTuple tuple;
|
||||
int16 attnum;
|
||||
Oid myrelid;
|
||||
Relation rel;
|
||||
HeapTuple tuple;
|
||||
int16 attnum;
|
||||
Oid myrelid;
|
||||
|
||||
if (!allowSystemTableMods && IsSystemRelationName(relationName))
|
||||
elog(ERROR, "ALTER TABLE: relation \"%s\" is a system catalog",
|
||||
|
@ -530,121 +530,122 @@ AlterTableAlterColumn(const char *relationName,
|
|||
elog(ERROR, "ALTER TABLE: permission denied");
|
||||
#endif
|
||||
|
||||
rel = heap_openr(relationName, AccessExclusiveLock);
|
||||
myrelid = RelationGetRelid(rel);
|
||||
heap_close(rel, NoLock);
|
||||
rel = heap_openr(relationName, AccessExclusiveLock);
|
||||
myrelid = RelationGetRelid(rel);
|
||||
heap_close(rel, NoLock);
|
||||
|
||||
/*
|
||||
* Propagate to children if desired
|
||||
*/
|
||||
/*
|
||||
* Propagate to children if desired
|
||||
*/
|
||||
if (inh)
|
||||
{
|
||||
List *child,
|
||||
*children;
|
||||
{
|
||||
List *child,
|
||||
*children;
|
||||
|
||||
/* this routine is actually in the planner */
|
||||
children = find_all_inheritors(myrelid);
|
||||
/* this routine is actually in the planner */
|
||||
children = find_all_inheritors(myrelid);
|
||||
|
||||
/*
|
||||
* find_all_inheritors does the recursive search of the
|
||||
* inheritance hierarchy, so all we have to do is process all
|
||||
* of the relids in the list that it returns.
|
||||
*/
|
||||
foreach(child, children)
|
||||
/*
|
||||
* find_all_inheritors does the recursive search of the
|
||||
* inheritance hierarchy, so all we have to do is process all of
|
||||
* the relids in the list that it returns.
|
||||
*/
|
||||
foreach(child, children)
|
||||
{
|
||||
Oid childrelid = lfirsti(child);
|
||||
Oid childrelid = lfirsti(child);
|
||||
|
||||
if (childrelid == myrelid)
|
||||
continue;
|
||||
rel = heap_open(childrelid, AccessExclusiveLock);
|
||||
AlterTableAlterColumn(RelationGetRelationName(rel),
|
||||
false, colName, newDefault);
|
||||
heap_close(rel, AccessExclusiveLock);
|
||||
}
|
||||
}
|
||||
if (childrelid == myrelid)
|
||||
continue;
|
||||
rel = heap_open(childrelid, AccessExclusiveLock);
|
||||
AlterTableAlterColumn(RelationGetRelationName(rel),
|
||||
false, colName, newDefault);
|
||||
heap_close(rel, AccessExclusiveLock);
|
||||
}
|
||||
}
|
||||
|
||||
/* -= now do the thing on this relation =- */
|
||||
/* -= now do the thing on this relation =- */
|
||||
|
||||
/* reopen the business */
|
||||
rel = heap_openr((char *)relationName, AccessExclusiveLock);
|
||||
/* reopen the business */
|
||||
rel = heap_openr((char *) relationName, AccessExclusiveLock);
|
||||
|
||||
/*
|
||||
* get the number of the attribute
|
||||
*/
|
||||
tuple = SearchSysCacheTuple(ATTNAME,
|
||||
ObjectIdGetDatum(myrelid),
|
||||
NameGetDatum(namein((char *)colName)),
|
||||
0, 0);
|
||||
/*
|
||||
* get the number of the attribute
|
||||
*/
|
||||
tuple = SearchSysCacheTuple(ATTNAME,
|
||||
ObjectIdGetDatum(myrelid),
|
||||
NameGetDatum(namein((char *) colName)),
|
||||
0, 0);
|
||||
|
||||
if (!HeapTupleIsValid(tuple))
|
||||
{
|
||||
heap_close(rel, AccessExclusiveLock);
|
||||
elog(ERROR, "ALTER TABLE: relation \"%s\" has no column \"%s\"",
|
||||
relationName, colName);
|
||||
}
|
||||
if (!HeapTupleIsValid(tuple))
|
||||
{
|
||||
heap_close(rel, AccessExclusiveLock);
|
||||
elog(ERROR, "ALTER TABLE: relation \"%s\" has no column \"%s\"",
|
||||
relationName, colName);
|
||||
}
|
||||
|
||||
attnum = ((Form_pg_attribute) GETSTRUCT(tuple))->attnum;
|
||||
attnum = ((Form_pg_attribute) GETSTRUCT(tuple))->attnum;
|
||||
|
||||
if (newDefault) /* SET DEFAULT */
|
||||
{
|
||||
List* rawDefaults = NIL;
|
||||
RawColumnDefault *rawEnt;
|
||||
if (newDefault) /* SET DEFAULT */
|
||||
{
|
||||
List *rawDefaults = NIL;
|
||||
RawColumnDefault *rawEnt;
|
||||
|
||||
/* Get rid of the old one first */
|
||||
drop_default(myrelid, attnum);
|
||||
/* Get rid of the old one first */
|
||||
drop_default(myrelid, attnum);
|
||||
|
||||
rawEnt = (RawColumnDefault *) palloc(sizeof(RawColumnDefault));
|
||||
rawEnt->attnum = attnum;
|
||||
rawEnt->raw_default = newDefault;
|
||||
rawEnt->raw_default = newDefault;
|
||||
rawDefaults = lappend(rawDefaults, rawEnt);
|
||||
|
||||
/*
|
||||
* This function is intended for CREATE TABLE,
|
||||
* so it processes a _list_ of defaults, but we just do one.
|
||||
*/
|
||||
AddRelationRawConstraints(rel, rawDefaults, NIL);
|
||||
}
|
||||
/*
|
||||
* This function is intended for CREATE TABLE, so it processes a
|
||||
* _list_ of defaults, but we just do one.
|
||||
*/
|
||||
AddRelationRawConstraints(rel, rawDefaults, NIL);
|
||||
}
|
||||
|
||||
else /* DROP DEFAULT */
|
||||
{
|
||||
Relation attr_rel;
|
||||
ScanKeyData scankeys[3];
|
||||
HeapScanDesc scan;
|
||||
HeapTuple tuple;
|
||||
else
|
||||
/* DROP DEFAULT */
|
||||
{
|
||||
Relation attr_rel;
|
||||
ScanKeyData scankeys[3];
|
||||
HeapScanDesc scan;
|
||||
HeapTuple tuple;
|
||||
|
||||
attr_rel = heap_openr(AttributeRelationName, AccessExclusiveLock);
|
||||
ScanKeyEntryInitialize(&scankeys[0], 0x0, Anum_pg_attribute_attrelid, F_OIDEQ,
|
||||
ObjectIdGetDatum(myrelid));
|
||||
ScanKeyEntryInitialize(&scankeys[1], 0x0, Anum_pg_attribute_attnum, F_INT2EQ,
|
||||
Int16GetDatum(attnum));
|
||||
ScanKeyEntryInitialize(&scankeys[2], 0x0, Anum_pg_attribute_atthasdef, F_BOOLEQ,
|
||||
TRUE);
|
||||
attr_rel = heap_openr(AttributeRelationName, AccessExclusiveLock);
|
||||
ScanKeyEntryInitialize(&scankeys[0], 0x0, Anum_pg_attribute_attrelid, F_OIDEQ,
|
||||
ObjectIdGetDatum(myrelid));
|
||||
ScanKeyEntryInitialize(&scankeys[1], 0x0, Anum_pg_attribute_attnum, F_INT2EQ,
|
||||
Int16GetDatum(attnum));
|
||||
ScanKeyEntryInitialize(&scankeys[2], 0x0, Anum_pg_attribute_atthasdef, F_BOOLEQ,
|
||||
TRUE);
|
||||
|
||||
scan = heap_beginscan(attr_rel, false, SnapshotNow, 3, scankeys);
|
||||
AssertState(scan!=NULL);
|
||||
scan = heap_beginscan(attr_rel, false, SnapshotNow, 3, scankeys);
|
||||
AssertState(scan != NULL);
|
||||
|
||||
if (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
|
||||
{
|
||||
HeapTuple newtuple;
|
||||
Relation irelations[Num_pg_attr_indices];
|
||||
if (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
|
||||
{
|
||||
HeapTuple newtuple;
|
||||
Relation irelations[Num_pg_attr_indices];
|
||||
|
||||
/* update to false */
|
||||
newtuple = heap_copytuple(tuple);
|
||||
((Form_pg_attribute) GETSTRUCT(newtuple))->atthasdef = FALSE;
|
||||
heap_update(attr_rel, &tuple->t_self, newtuple, NULL);
|
||||
/* update to false */
|
||||
newtuple = heap_copytuple(tuple);
|
||||
((Form_pg_attribute) GETSTRUCT(newtuple))->atthasdef = FALSE;
|
||||
heap_update(attr_rel, &tuple->t_self, newtuple, NULL);
|
||||
|
||||
/* keep the system catalog indices current */
|
||||
CatalogOpenIndices(Num_pg_attr_indices, Name_pg_attr_indices, irelations);
|
||||
CatalogIndexInsert(irelations, Num_pg_attr_indices, attr_rel, newtuple);
|
||||
CatalogCloseIndices(Num_pg_attr_indices, irelations);
|
||||
/* keep the system catalog indices current */
|
||||
CatalogOpenIndices(Num_pg_attr_indices, Name_pg_attr_indices, irelations);
|
||||
CatalogIndexInsert(irelations, Num_pg_attr_indices, attr_rel, newtuple);
|
||||
CatalogCloseIndices(Num_pg_attr_indices, irelations);
|
||||
|
||||
/* get rid of actual default definition */
|
||||
drop_default(myrelid, attnum);
|
||||
}
|
||||
/* get rid of actual default definition */
|
||||
drop_default(myrelid, attnum);
|
||||
}
|
||||
|
||||
heap_endscan(scan);
|
||||
heap_close(attr_rel, NoLock);
|
||||
}
|
||||
heap_endscan(scan);
|
||||
heap_close(attr_rel, NoLock);
|
||||
}
|
||||
|
||||
heap_close(rel, NoLock);
|
||||
}
|
||||
|
@ -654,33 +655,33 @@ AlterTableAlterColumn(const char *relationName,
|
|||
static void
|
||||
drop_default(Oid relid, int16 attnum)
|
||||
{
|
||||
ScanKeyData scankeys[2];
|
||||
HeapScanDesc scan;
|
||||
Relation attrdef_rel;
|
||||
HeapTuple tuple;
|
||||
ScanKeyData scankeys[2];
|
||||
HeapScanDesc scan;
|
||||
Relation attrdef_rel;
|
||||
HeapTuple tuple;
|
||||
|
||||
attrdef_rel = heap_openr(AttrDefaultRelationName, AccessExclusiveLock);
|
||||
ScanKeyEntryInitialize(&scankeys[0], 0x0, Anum_pg_attrdef_adrelid, F_OIDEQ,
|
||||
ObjectIdGetDatum(relid));
|
||||
ScanKeyEntryInitialize(&scankeys[1], 0x0, Anum_pg_attrdef_adnum, F_INT2EQ,
|
||||
Int16GetDatum(attnum));
|
||||
attrdef_rel = heap_openr(AttrDefaultRelationName, AccessExclusiveLock);
|
||||
ScanKeyEntryInitialize(&scankeys[0], 0x0, Anum_pg_attrdef_adrelid, F_OIDEQ,
|
||||
ObjectIdGetDatum(relid));
|
||||
ScanKeyEntryInitialize(&scankeys[1], 0x0, Anum_pg_attrdef_adnum, F_INT2EQ,
|
||||
Int16GetDatum(attnum));
|
||||
|
||||
scan = heap_beginscan(attrdef_rel, false, SnapshotNow, 2, scankeys);
|
||||
AssertState(scan!=NULL);
|
||||
scan = heap_beginscan(attrdef_rel, false, SnapshotNow, 2, scankeys);
|
||||
AssertState(scan != NULL);
|
||||
|
||||
if (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
|
||||
heap_delete(attrdef_rel, &tuple->t_self, NULL);
|
||||
if (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
|
||||
heap_delete(attrdef_rel, &tuple->t_self, NULL);
|
||||
|
||||
heap_endscan(scan);
|
||||
heap_endscan(scan);
|
||||
|
||||
heap_close(attrdef_rel, NoLock);
|
||||
heap_close(attrdef_rel, NoLock);
|
||||
}
|
||||
|
||||
|
||||
#ifdef _DROP_COLUMN_HACK__
|
||||
/*
|
||||
* ALTER TABLE DROP COLUMN trial implementation
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
|
@ -690,17 +691,17 @@ typedef struct SysScanDescData
|
|||
{
|
||||
Relation heap_rel;
|
||||
Relation irel;
|
||||
HeapScanDesc scan;
|
||||
IndexScanDesc iscan;
|
||||
HeapTupleData tuple;
|
||||
HeapScanDesc scan;
|
||||
IndexScanDesc iscan;
|
||||
HeapTupleData tuple;
|
||||
Buffer buffer;
|
||||
} SysScanDescData, *SysScanDesc;
|
||||
|
||||
} SysScanDescData, *SysScanDesc;
|
||||
|
||||
static void *
|
||||
systable_beginscan(Relation rel, const char *indexRelname, int nkeys, ScanKey entry)
|
||||
{
|
||||
bool hasindex = (rel->rd_rel->relhasindex && !IsIgnoringSystemIndexes());
|
||||
SysScanDesc sysscan;
|
||||
bool hasindex = (rel->rd_rel->relhasindex && !IsIgnoringSystemIndexes());
|
||||
SysScanDesc sysscan;
|
||||
|
||||
sysscan = (SysScanDesc) palloc(sizeof(SysScanDescData));
|
||||
sysscan->heap_rel = rel;
|
||||
|
@ -710,7 +711,7 @@ systable_beginscan(Relation rel, const char *indexRelname, int nkeys, ScanKey en
|
|||
sysscan->buffer = InvalidBuffer;
|
||||
if (hasindex)
|
||||
{
|
||||
sysscan->irel = index_openr((char *)indexRelname);
|
||||
sysscan->irel = index_openr((char *) indexRelname);
|
||||
sysscan->iscan = index_beginscan(sysscan->irel, false, nkeys, entry);
|
||||
}
|
||||
else
|
||||
|
@ -720,7 +721,7 @@ systable_beginscan(Relation rel, const char *indexRelname, int nkeys, ScanKey en
|
|||
static void
|
||||
systable_endscan(void *scan)
|
||||
{
|
||||
SysScanDesc sysscan = (SysScanDesc) scan;
|
||||
SysScanDesc sysscan = (SysScanDesc) scan;
|
||||
|
||||
if (sysscan->irel)
|
||||
{
|
||||
|
@ -736,9 +737,9 @@ systable_endscan(void *scan)
|
|||
static HeapTuple
|
||||
systable_getnext(void *scan)
|
||||
{
|
||||
SysScanDesc sysscan = (SysScanDesc) scan;
|
||||
SysScanDesc sysscan = (SysScanDesc) scan;
|
||||
HeapTuple htup = (HeapTuple) NULL;
|
||||
RetrieveIndexResult indexRes;
|
||||
RetrieveIndexResult indexRes;
|
||||
|
||||
if (sysscan->irel)
|
||||
{
|
||||
|
@ -774,50 +775,55 @@ find_attribute_walker(Node *node, int attnum)
|
|||
return false;
|
||||
if (IsA(node, Var))
|
||||
{
|
||||
Var *var = (Var *) node;
|
||||
Var *var = (Var *) node;
|
||||
|
||||
if (var->varlevelsup == 0 && var->varno == 1 &&
|
||||
var->varattno == attnum)
|
||||
return true;
|
||||
}
|
||||
return expression_tree_walker(node, find_attribute_walker, (void *)attnum);
|
||||
return expression_tree_walker(node, find_attribute_walker, (void *) attnum);
|
||||
}
|
||||
static bool
|
||||
find_attribute_in_node(Node *node, int attnum)
|
||||
{
|
||||
return expression_tree_walker(node, find_attribute_walker, (void *)attnum);
|
||||
return expression_tree_walker(node, find_attribute_walker, (void *) attnum);
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove/check references for the column
|
||||
*/
|
||||
static bool
|
||||
RemoveColumnReferences(Oid reloid, int attnum, bool checkonly, HeapTuple reltup)
|
||||
{
|
||||
Relation indexRelation, rcrel;
|
||||
ScanKeyData entry;
|
||||
HeapScanDesc scan;
|
||||
void *sysscan;
|
||||
HeapTuple htup, indexTuple;
|
||||
Form_pg_index index;
|
||||
Form_pg_relcheck relcheck;
|
||||
Form_pg_class pgcform = (Form_pg_class) NULL;
|
||||
int i;
|
||||
Relation indexRelation,
|
||||
rcrel;
|
||||
ScanKeyData entry;
|
||||
HeapScanDesc scan;
|
||||
void *sysscan;
|
||||
HeapTuple htup,
|
||||
indexTuple;
|
||||
Form_pg_index index;
|
||||
Form_pg_relcheck relcheck;
|
||||
Form_pg_class pgcform = (Form_pg_class) NULL;
|
||||
int i;
|
||||
bool checkok = true;
|
||||
|
||||
|
||||
|
||||
if (!checkonly)
|
||||
pgcform = (Form_pg_class) GETSTRUCT (reltup);
|
||||
pgcform = (Form_pg_class) GETSTRUCT(reltup);
|
||||
|
||||
/*
|
||||
* Remove/check constraints here
|
||||
* Remove/check constraints here
|
||||
*/
|
||||
ScanKeyEntryInitialize(&entry, (bits16) 0x0, Anum_pg_relcheck_rcrelid,
|
||||
(RegProcedure) F_OIDEQ, ObjectIdGetDatum(reloid));
|
||||
(RegProcedure) F_OIDEQ, ObjectIdGetDatum(reloid));
|
||||
rcrel = heap_openr(RelCheckRelationName, RowExclusiveLock);
|
||||
sysscan = systable_beginscan(rcrel, RelCheckIndex,1 ,&entry);
|
||||
sysscan = systable_beginscan(rcrel, RelCheckIndex, 1, &entry);
|
||||
|
||||
while (HeapTupleIsValid(htup = systable_getnext(sysscan)))
|
||||
{
|
||||
char *ccbin;
|
||||
Node *node;
|
||||
char *ccbin;
|
||||
Node *node;
|
||||
|
||||
relcheck = (Form_pg_relcheck) GETSTRUCT(htup);
|
||||
ccbin = textout(&relcheck->rcbin);
|
||||
|
@ -843,15 +849,15 @@ RemoveColumnReferences(Oid reloid, int attnum, bool checkonly, HeapTuple reltup)
|
|||
heap_close(rcrel, NoLock);
|
||||
|
||||
/*
|
||||
* What to do with triggers/rules/views/procedues ?
|
||||
* What to do with triggers/rules/views/procedues ?
|
||||
*/
|
||||
|
||||
/*
|
||||
* Remove/check indexes
|
||||
* Remove/check indexes
|
||||
*/
|
||||
indexRelation = heap_openr(IndexRelationName, RowExclusiveLock);
|
||||
ScanKeyEntryInitialize(&entry, 0, Anum_pg_index_indrelid, F_OIDEQ,
|
||||
ObjectIdGetDatum(reloid));
|
||||
ObjectIdGetDatum(reloid));
|
||||
scan = heap_beginscan(indexRelation, false, SnapshotNow, 1, &entry);
|
||||
while (HeapTupleIsValid(indexTuple = heap_getnext(scan, 0)))
|
||||
{
|
||||
|
@ -870,8 +876,8 @@ RemoveColumnReferences(Oid reloid, int attnum, bool checkonly, HeapTuple reltup)
|
|||
else
|
||||
{
|
||||
htup = SearchSysCacheTuple(RELOID,
|
||||
ObjectIdGetDatum(index->indexrelid),
|
||||
0, 0, 0);
|
||||
ObjectIdGetDatum(index->indexrelid),
|
||||
0, 0, 0);
|
||||
RemoveIndex(NameStr(((Form_pg_class) GETSTRUCT(htup))->relname));
|
||||
}
|
||||
break;
|
||||
|
@ -883,33 +889,38 @@ RemoveColumnReferences(Oid reloid, int attnum, bool checkonly, HeapTuple reltup)
|
|||
|
||||
return checkok;
|
||||
}
|
||||
#endif /* _DROP_COLUMN_HACK__ */
|
||||
|
||||
#endif /* _DROP_COLUMN_HACK__ */
|
||||
|
||||
/*
|
||||
* ALTER TABLE DROP COLUMN
|
||||
*/
|
||||
void
|
||||
AlterTableDropColumn(const char *relationName,
|
||||
bool inh, const char *colName,
|
||||
int behavior)
|
||||
bool inh, const char *colName,
|
||||
int behavior)
|
||||
{
|
||||
#ifdef _DROP_COLUMN_HACK__
|
||||
Relation rel, attrdesc, adrel;
|
||||
Oid myrelid, attoid;
|
||||
Relation rel,
|
||||
attrdesc,
|
||||
adrel;
|
||||
Oid myrelid,
|
||||
attoid;
|
||||
HeapTuple reltup;
|
||||
HeapTupleData classtuple;
|
||||
HeapTupleData classtuple;
|
||||
Buffer buffer;
|
||||
Form_pg_attribute attribute;
|
||||
HeapTuple tup;
|
||||
Relation idescs[Num_pg_attr_indices];
|
||||
int attnum;
|
||||
int attnum;
|
||||
bool hasindex;
|
||||
char dropColname[32];
|
||||
void *sysscan;
|
||||
ScanKeyData scankeys[2];
|
||||
void *sysscan;
|
||||
ScanKeyData scankeys[2];
|
||||
|
||||
if (inh)
|
||||
if (inh)
|
||||
elog(ERROR, "ALTER TABLE / DROP COLUMN with inherit option is not supported yet");
|
||||
|
||||
/*
|
||||
* permissions checking. this would normally be done in utility.c,
|
||||
* but this particular routine is recursive.
|
||||
|
@ -925,25 +936,25 @@ AlterTableDropColumn(const char *relationName,
|
|||
#endif
|
||||
|
||||
/*
|
||||
* Grab an exclusive lock on the target table, which we will NOT release
|
||||
* until end of transaction.
|
||||
* Grab an exclusive lock on the target table, which we will NOT
|
||||
* release until end of transaction.
|
||||
*/
|
||||
rel = heap_openr(relationName, AccessExclusiveLock);
|
||||
myrelid = RelationGetRelid(rel);
|
||||
heap_close(rel, NoLock); /* close rel but keep lock! */
|
||||
|
||||
/*
|
||||
* What to do when rel has inheritors ?
|
||||
* What to do when rel has inheritors ?
|
||||
*/
|
||||
if (length(find_all_inheritors(myrelid)) > 1)
|
||||
elog(ERROR, "ALTER TABLE: cannot drop a column on table that is inherited from");
|
||||
|
||||
|
||||
/*
|
||||
* lock the pg_class tuple for update
|
||||
* lock the pg_class tuple for update
|
||||
*/
|
||||
reltup = SearchSysCacheTuple(RELNAME, PointerGetDatum(relationName),
|
||||
0, 0, 0);
|
||||
0, 0, 0);
|
||||
|
||||
if (!HeapTupleIsValid(reltup))
|
||||
elog(ERROR, "ALTER TABLE: relation \"%s\" not found",
|
||||
|
@ -976,19 +987,20 @@ AlterTableDropColumn(const char *relationName,
|
|||
* Get the target pg_attribute tuple
|
||||
*/
|
||||
tup = SearchSysCacheTupleCopy(ATTNAME,
|
||||
ObjectIdGetDatum(reltup->t_data->t_oid),
|
||||
PointerGetDatum(colName), 0, 0);
|
||||
ObjectIdGetDatum(reltup->t_data->t_oid),
|
||||
PointerGetDatum(colName), 0, 0);
|
||||
if (!HeapTupleIsValid(tup))
|
||||
elog(ERROR, "ALTER TABLE: column name \"%s\" doesn't exist in table \"%s\"",
|
||||
colName, relationName);
|
||||
colName, relationName);
|
||||
|
||||
attribute = (Form_pg_attribute) GETSTRUCT(tup);
|
||||
if (attribute->attnum <= 0)
|
||||
elog(ERROR, "ALTER TABLE: column name \"%s\" was already dropped", colName);
|
||||
attnum = attribute->attnum;
|
||||
attoid = tup->t_data->t_oid;
|
||||
|
||||
/*
|
||||
* Check constraints/indices etc here
|
||||
* Check constraints/indices etc here
|
||||
*/
|
||||
if (behavior != CASCADE)
|
||||
{
|
||||
|
@ -997,7 +1009,7 @@ AlterTableDropColumn(const char *relationName,
|
|||
}
|
||||
|
||||
/*
|
||||
* change the target pg_attribute tuple
|
||||
* change the target pg_attribute tuple
|
||||
*/
|
||||
sprintf(dropColname, "*already Dropped*%d", attnum);
|
||||
namestrcpy(&(attribute->attname), dropColname);
|
||||
|
@ -1009,7 +1021,7 @@ AlterTableDropColumn(const char *relationName,
|
|||
{
|
||||
CatalogOpenIndices(Num_pg_attr_indices, Name_pg_attr_indices, idescs);
|
||||
CatalogIndexInsert(idescs, Num_pg_attr_indices,
|
||||
attrdesc, tup);
|
||||
attrdesc, tup);
|
||||
CatalogCloseIndices(Num_pg_attr_indices, idescs);
|
||||
}
|
||||
heap_close(attrdesc, NoLock);
|
||||
|
@ -1020,15 +1032,17 @@ AlterTableDropColumn(const char *relationName,
|
|||
/* delete attrdef */
|
||||
adrel = heap_openr(AttrDefaultRelationName, RowExclusiveLock);
|
||||
ScanKeyEntryInitialize(&scankeys[0], 0x0, Anum_pg_attrdef_adrelid,
|
||||
F_OIDEQ, ObjectIdGetDatum(myrelid));
|
||||
/* Oops pg_attrdef doesn't have (adrelid,adnum) index
|
||||
ScanKeyEntryInitialize(&scankeys[1], 0x0, Anum_pg_attrdef_adnum,
|
||||
F_INT2EQ, Int16GetDatum(attnum));
|
||||
sysscan = systable_beginscan(adrel, AttrDefaultIndex, 2, scankeys);
|
||||
*/
|
||||
F_OIDEQ, ObjectIdGetDatum(myrelid));
|
||||
|
||||
/*
|
||||
* Oops pg_attrdef doesn't have (adrelid,adnum) index
|
||||
* ScanKeyEntryInitialize(&scankeys[1], 0x0, Anum_pg_attrdef_adnum,
|
||||
* F_INT2EQ, Int16GetDatum(attnum)); sysscan =
|
||||
* systable_beginscan(adrel, AttrDefaultIndex, 2, scankeys);
|
||||
*/
|
||||
sysscan = systable_beginscan(adrel, AttrDefaultIndex, 1, scankeys);
|
||||
while (HeapTupleIsValid(tup = systable_getnext(sysscan)))
|
||||
{
|
||||
{
|
||||
if (((Form_pg_attrdef) GETSTRUCT(tup))->adnum == attnum)
|
||||
{
|
||||
heap_delete(adrel, &tup->t_self, NULL);
|
||||
|
@ -1037,8 +1051,9 @@ AlterTableDropColumn(const char *relationName,
|
|||
}
|
||||
systable_endscan(sysscan);
|
||||
heap_close(adrel, NoLock);
|
||||
|
||||
/*
|
||||
* Remove objects which reference this column
|
||||
* Remove objects which reference this column
|
||||
*/
|
||||
if (behavior == CASCADE)
|
||||
{
|
||||
|
@ -1055,8 +1070,8 @@ AlterTableDropColumn(const char *relationName,
|
|||
heap_freetuple(reltup);
|
||||
heap_close(rel, NoLock);
|
||||
#else
|
||||
elog(ERROR, "ALTER TABLE / DROP COLUMN is not implemented");
|
||||
#endif /* _DROP_COLUMN_HACK__ */
|
||||
elog(ERROR, "ALTER TABLE / DROP COLUMN is not implemented");
|
||||
#endif /* _DROP_COLUMN_HACK__ */
|
||||
}
|
||||
|
||||
|
||||
|
@ -1066,76 +1081,80 @@ AlterTableDropColumn(const char *relationName,
|
|||
*/
|
||||
void
|
||||
AlterTableAddConstraint(const char *relationName,
|
||||
bool inh, Node *newConstraint)
|
||||
bool inh, Node *newConstraint)
|
||||
{
|
||||
if (newConstraint == NULL)
|
||||
if (newConstraint == NULL)
|
||||
elog(ERROR, "ALTER TABLE / ADD CONSTRAINT passed invalid constraint.");
|
||||
|
||||
switch (nodeTag(newConstraint))
|
||||
switch (nodeTag(newConstraint))
|
||||
{
|
||||
case T_Constraint:
|
||||
elog(ERROR, "ALTER TABLE / ADD CONSTRAINT is not implemented");
|
||||
case T_FkConstraint:
|
||||
{
|
||||
FkConstraint *fkconstraint=(FkConstraint *)newConstraint;
|
||||
Relation rel;
|
||||
FkConstraint *fkconstraint = (FkConstraint *) newConstraint;
|
||||
Relation rel;
|
||||
HeapScanDesc scan;
|
||||
HeapTuple tuple;
|
||||
Trigger trig;
|
||||
List *list;
|
||||
int count;
|
||||
HeapTuple tuple;
|
||||
Trigger trig;
|
||||
List *list;
|
||||
int count;
|
||||
|
||||
/*
|
||||
/*
|
||||
* Grab an exclusive lock on the pk table, so that someone
|
||||
* doesn't delete rows out from under us.
|
||||
* doesn't delete rows out from under us.
|
||||
*/
|
||||
|
||||
rel = heap_openr(fkconstraint->pktable_name, AccessExclusiveLock);
|
||||
heap_close(rel, NoLock);
|
||||
|
||||
/*
|
||||
* Grab an exclusive lock on the fk table, and then scan through
|
||||
* each tuple, calling the RI_FKey_Match_Ins (insert trigger)
|
||||
* as if that tuple had just been inserted. If any of those
|
||||
* fail, it should elog(ERROR) and that's that.
|
||||
/*
|
||||
* Grab an exclusive lock on the fk table, and then scan
|
||||
* through each tuple, calling the RI_FKey_Match_Ins
|
||||
* (insert trigger) as if that tuple had just been
|
||||
* inserted. If any of those fail, it should elog(ERROR)
|
||||
* and that's that.
|
||||
*/
|
||||
rel = heap_openr(relationName, AccessExclusiveLock);
|
||||
trig.tgoid = 0;
|
||||
trig.tgname = "<unknown>";
|
||||
trig.tgfoid = 0;
|
||||
trig.tgfoid = 0;
|
||||
trig.tgtype = 0;
|
||||
trig.tgenabled = TRUE;
|
||||
trig.tgisconstraint = TRUE;
|
||||
trig.tginitdeferred = FALSE;
|
||||
trig.tgdeferrable = FALSE;
|
||||
|
||||
trig.tgargs = (char **)palloc(
|
||||
sizeof(char *) * (4 + length(fkconstraint->fk_attrs)
|
||||
+ length(fkconstraint->pk_attrs)));
|
||||
|
||||
trig.tgargs = (char **) palloc(
|
||||
sizeof(char *) * (4 + length(fkconstraint->fk_attrs)
|
||||
+ length(fkconstraint->pk_attrs)));
|
||||
|
||||
trig.tgargs[0] = "<unnamed>";
|
||||
trig.tgargs[1] = (char *)relationName;
|
||||
trig.tgargs[1] = (char *) relationName;
|
||||
trig.tgargs[2] = fkconstraint->pktable_name;
|
||||
trig.tgargs[3] = fkconstraint->match_type;
|
||||
count = 4;
|
||||
foreach (list, fkconstraint->fk_attrs)
|
||||
foreach(list, fkconstraint->fk_attrs)
|
||||
{
|
||||
Ident *fk_at = lfirst(list);
|
||||
Ident *fk_at = lfirst(list);
|
||||
|
||||
trig.tgargs[count++] = fk_at->name;
|
||||
}
|
||||
foreach (list, fkconstraint->pk_attrs)
|
||||
foreach(list, fkconstraint->pk_attrs)
|
||||
{
|
||||
Ident *pk_at = lfirst(list);
|
||||
Ident *pk_at = lfirst(list);
|
||||
|
||||
trig.tgargs[count++] = pk_at->name;
|
||||
}
|
||||
trig.tgnargs = count;
|
||||
|
||||
scan = heap_beginscan(rel, false, SnapshotNow, 0, NULL);
|
||||
AssertState(scan!=NULL);
|
||||
AssertState(scan != NULL);
|
||||
|
||||
while (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
|
||||
{
|
||||
TriggerData newtrigdata;
|
||||
|
||||
newtrigdata.tg_event = TRIGGER_EVENT_INSERT | TRIGGER_EVENT_ROW;
|
||||
newtrigdata.tg_relation = rel;
|
||||
newtrigdata.tg_trigtuple = tuple;
|
||||
|
@ -1149,7 +1168,8 @@ AlterTableAddConstraint(const char *relationName,
|
|||
/* Make a call to the check function */
|
||||
}
|
||||
heap_endscan(scan);
|
||||
heap_close(rel, NoLock); /* close rel but keep lock! */
|
||||
heap_close(rel, NoLock); /* close rel but keep
|
||||
* lock! */
|
||||
|
||||
pfree(trig.tgargs);
|
||||
}
|
||||
|
@ -1166,10 +1186,10 @@ AlterTableAddConstraint(const char *relationName,
|
|||
*/
|
||||
void
|
||||
AlterTableDropConstraint(const char *relationName,
|
||||
bool inh, const char *constrName,
|
||||
int behavior)
|
||||
bool inh, const char *constrName,
|
||||
int behavior)
|
||||
{
|
||||
elog(ERROR, "ALTER TABLE / DROP CONSTRAINT is not implemented");
|
||||
elog(ERROR, "ALTER TABLE / DROP CONSTRAINT is not implemented");
|
||||
}
|
||||
|
||||
|
||||
|
@ -1186,7 +1206,7 @@ LockTableCommand(LockStmt *lockstmt)
|
|||
int aclresult;
|
||||
|
||||
rel = heap_openr(lockstmt->relname, NoLock);
|
||||
if (! RelationIsValid(rel))
|
||||
if (!RelationIsValid(rel))
|
||||
elog(ERROR, "Relation '%s' does not exist", lockstmt->relname);
|
||||
|
||||
if (lockstmt->mode == AccessShareLock)
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -7,7 +7,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.103 2000/03/23 21:38:58 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.104 2000/04/12 17:14:58 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -64,8 +64,8 @@ static int CountTuples(Relation relation);
|
|||
* Static communication variables ... pretty grotty, but COPY has
|
||||
* never been reentrant...
|
||||
*/
|
||||
int lineno = 0; /* used by elog() -- dz */
|
||||
static bool fe_eof;
|
||||
int lineno = 0; /* used by elog() -- dz */
|
||||
static bool fe_eof;
|
||||
|
||||
/*
|
||||
* These static variables are used to avoid incurring overhead for each
|
||||
|
@ -76,9 +76,11 @@ static bool fe_eof;
|
|||
* to attribute_buf's data buffer!
|
||||
* encoding, if needed, can be set once at the start of the copy operation.
|
||||
*/
|
||||
static StringInfoData attribute_buf;
|
||||
static StringInfoData attribute_buf;
|
||||
|
||||
#ifdef MULTIBYTE
|
||||
static int encoding;
|
||||
static int encoding;
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -113,11 +115,11 @@ CopySendData(void *databuf, int datasize, FILE *fp)
|
|||
fe_eof = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
{
|
||||
fwrite(databuf, datasize, 1, fp);
|
||||
if (ferror(fp))
|
||||
elog(ERROR, "CopySendData: %s", strerror(errno));
|
||||
}
|
||||
if (ferror(fp))
|
||||
elog(ERROR, "CopySendData: %s", strerror(errno));
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -194,7 +196,8 @@ CopyPeekChar(FILE *fp)
|
|||
{
|
||||
if (!fp)
|
||||
{
|
||||
int ch = pq_peekbyte();
|
||||
int ch = pq_peekbyte();
|
||||
|
||||
if (ch == EOF)
|
||||
fe_eof = true;
|
||||
return ch;
|
||||
|
@ -280,15 +283,15 @@ DoCopy(char *relname, bool binary, bool oids, bool from, bool pipe,
|
|||
* Open and lock the relation, using the appropriate lock type.
|
||||
*
|
||||
* Note: AccessExclusive is probably overkill for copying to a relation,
|
||||
* but that's what the code grabs on the rel's indices. If this lock is
|
||||
* relaxed then I think the index locks need relaxed also.
|
||||
* but that's what the code grabs on the rel's indices. If this lock
|
||||
* is relaxed then I think the index locks need relaxed also.
|
||||
*/
|
||||
rel = heap_openr(relname, (from ? AccessExclusiveLock : AccessShareLock));
|
||||
|
||||
result = pg_aclcheck(relname, UserName, required_access);
|
||||
if (result != ACLCHECK_OK)
|
||||
elog(ERROR, "%s: %s", relname, aclcheck_error_strings[result]);
|
||||
if (!pipe && !superuser())
|
||||
if (!pipe && !superuser())
|
||||
elog(ERROR, "You must have Postgres superuser privilege to do a COPY "
|
||||
"directly to or from a file. Anyone can COPY to stdout or "
|
||||
"from stdin. Psql's \\copy command also works for anyone.");
|
||||
|
@ -345,13 +348,13 @@ DoCopy(char *relname, bool binary, bool oids, bool from, bool pipe,
|
|||
}
|
||||
else
|
||||
{
|
||||
mode_t oumask; /* Pre-existing umask value */
|
||||
mode_t oumask; /* Pre-existing umask value */
|
||||
|
||||
oumask = umask((mode_t) 022);
|
||||
oumask = umask((mode_t) 022);
|
||||
|
||||
if (*filename != '/')
|
||||
elog(ERROR, "Relative path not allowed for server side"
|
||||
" COPY command.");
|
||||
" COPY command.");
|
||||
|
||||
#ifndef __CYGWIN32__
|
||||
fp = AllocateFile(filename, "w");
|
||||
|
@ -369,9 +372,7 @@ DoCopy(char *relname, bool binary, bool oids, bool from, bool pipe,
|
|||
}
|
||||
|
||||
if (!pipe)
|
||||
{
|
||||
FreeFile(fp);
|
||||
}
|
||||
else if (!from)
|
||||
{
|
||||
if (!binary)
|
||||
|
@ -382,9 +383,10 @@ DoCopy(char *relname, bool binary, bool oids, bool from, bool pipe,
|
|||
pfree(attribute_buf.data);
|
||||
|
||||
/*
|
||||
* Close the relation. If reading, we can release the AccessShareLock
|
||||
* we got; if writing, we should hold the lock until end of transaction
|
||||
* to ensure that updates will be committed before lock is released.
|
||||
* Close the relation. If reading, we can release the AccessShareLock
|
||||
* we got; if writing, we should hold the lock until end of
|
||||
* transaction to ensure that updates will be committed before lock is
|
||||
* released.
|
||||
*/
|
||||
heap_close(rel, (from ? NoLock : AccessShareLock));
|
||||
}
|
||||
|
@ -399,9 +401,11 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp, char *delim, char *null_p
|
|||
|
||||
int32 attr_count,
|
||||
i;
|
||||
|
||||
#ifdef _DROP_COLUMN_HACK__
|
||||
bool *valid;
|
||||
#endif /* _DROP_COLUMN_HACK__ */
|
||||
bool *valid;
|
||||
|
||||
#endif /* _DROP_COLUMN_HACK__ */
|
||||
Form_pg_attribute *attr;
|
||||
FmgrInfo *out_functions;
|
||||
Oid out_func_oid;
|
||||
|
@ -435,7 +439,7 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp, char *delim, char *null_p
|
|||
typmod = (int32 *) palloc(attr_count * sizeof(int32));
|
||||
#ifdef _DROP_COLUMN_HACK__
|
||||
valid = (bool *) palloc(attr_count * sizeof(bool));
|
||||
#endif /* _DROP_COLUMN_HACK__ */
|
||||
#endif /* _DROP_COLUMN_HACK__ */
|
||||
for (i = 0; i < attr_count; i++)
|
||||
{
|
||||
#ifdef _DROP_COLUMN_HACK__
|
||||
|
@ -446,7 +450,7 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp, char *delim, char *null_p
|
|||
}
|
||||
else
|
||||
valid[i] = true;
|
||||
#endif /* _DROP_COLUMN_HACK__ */
|
||||
#endif /* _DROP_COLUMN_HACK__ */
|
||||
out_func_oid = (Oid) GetOutputFunction(attr[i]->atttypid);
|
||||
fmgr_info(out_func_oid, &out_functions[i]);
|
||||
elements[i] = GetTypeElement(attr[i]->atttypid);
|
||||
|
@ -493,7 +497,7 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp, char *delim, char *null_p
|
|||
CopySendChar('\n', fp);
|
||||
continue;
|
||||
}
|
||||
#endif /* _DROP_COLUMN_HACK__ */
|
||||
#endif /* _DROP_COLUMN_HACK__ */
|
||||
if (!isnull)
|
||||
{
|
||||
string = (char *) (*fmgr_faddr(&out_functions[i]))
|
||||
|
@ -502,7 +506,7 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp, char *delim, char *null_p
|
|||
pfree(string);
|
||||
}
|
||||
else
|
||||
CopySendString(null_print, fp); /* null indicator */
|
||||
CopySendString(null_print, fp); /* null indicator */
|
||||
|
||||
if (i == attr_count - 1)
|
||||
CopySendChar('\n', fp);
|
||||
|
@ -723,7 +727,7 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim, char *null
|
|||
#ifdef _DROP_COLUMN_HACK__
|
||||
if (COLUMN_IS_DROPPED(attr[i]))
|
||||
continue;
|
||||
#endif /* _DROP_COLUMN_HACK__ */
|
||||
#endif /* _DROP_COLUMN_HACK__ */
|
||||
in_func_oid = (Oid) GetInputFunction(attr[i]->atttypid);
|
||||
fmgr_info(in_func_oid, &in_functions[i]);
|
||||
elements[i] = GetTypeElement(attr[i]->atttypid);
|
||||
|
@ -756,7 +760,7 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim, char *null
|
|||
byval[i] = 'n';
|
||||
continue;
|
||||
}
|
||||
#endif /* _DROP_COLUMN_HACK__ */
|
||||
#endif /* _DROP_COLUMN_HACK__ */
|
||||
byval[i] = (bool) IsTypeByVal(attr[i]->atttypid);
|
||||
}
|
||||
|
||||
|
@ -765,7 +769,8 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim, char *null
|
|||
|
||||
while (!done)
|
||||
{
|
||||
if (QueryCancel) {
|
||||
if (QueryCancel)
|
||||
{
|
||||
lineno = 0;
|
||||
CancelQuery();
|
||||
}
|
||||
|
@ -796,7 +801,7 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim, char *null
|
|||
nulls[i] = 'n';
|
||||
continue;
|
||||
}
|
||||
#endif /* _DROP_COLUMN_HACK__ */
|
||||
#endif /* _DROP_COLUMN_HACK__ */
|
||||
string = CopyReadAttribute(fp, &isnull, delim, &newline, null_print);
|
||||
if (isnull)
|
||||
{
|
||||
|
@ -937,7 +942,7 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim, char *null
|
|||
*/
|
||||
slot->val = tuple;
|
||||
/* SetSlotContents(slot, tuple); */
|
||||
if (! ExecQual((List *) indexPred[i], econtext, false))
|
||||
if (!ExecQual((List *) indexPred[i], econtext, false))
|
||||
continue;
|
||||
#endif /* OMIT_PARTIAL_INDEX */
|
||||
}
|
||||
|
@ -1189,6 +1194,7 @@ static char *
|
|||
CopyReadAttribute(FILE *fp, bool *isnull, char *delim, int *newline, char *null_print)
|
||||
{
|
||||
int c;
|
||||
|
||||
#ifdef MULTIBYTE
|
||||
int mblen;
|
||||
unsigned char s[2];
|
||||
|
@ -1222,9 +1228,7 @@ CopyReadAttribute(FILE *fp, bool *isnull, char *delim, int *newline, char *null_
|
|||
break;
|
||||
}
|
||||
if (strchr(delim, c))
|
||||
{
|
||||
break;
|
||||
}
|
||||
if (c == '\\')
|
||||
{
|
||||
c = CopyGetChar(fp);
|
||||
|
@ -1272,13 +1276,16 @@ CopyReadAttribute(FILE *fp, bool *isnull, char *delim, int *newline, char *null_
|
|||
c = val & 0377;
|
||||
}
|
||||
break;
|
||||
/* This is a special hack to parse `\N' as <backslash-N>
|
||||
rather then just 'N' to provide compatibility with
|
||||
the default NULL output. -- pe */
|
||||
case 'N':
|
||||
appendStringInfoCharMacro(&attribute_buf, '\\');
|
||||
c = 'N';
|
||||
break;
|
||||
|
||||
/*
|
||||
* This is a special hack to parse `\N' as
|
||||
* <backslash-N> rather then just 'N' to provide
|
||||
* compatibility with the default NULL output. -- pe
|
||||
*/
|
||||
case 'N':
|
||||
appendStringInfoCharMacro(&attribute_buf, '\\');
|
||||
c = 'N';
|
||||
break;
|
||||
case 'b':
|
||||
c = '\b';
|
||||
break;
|
||||
|
@ -1332,8 +1339,8 @@ CopyReadAttribute(FILE *fp, bool *isnull, char *delim, int *newline, char *null_
|
|||
}
|
||||
#endif
|
||||
|
||||
if (strcmp(attribute_buf.data, null_print)==0)
|
||||
*isnull = true;
|
||||
if (strcmp(attribute_buf.data, null_print) == 0)
|
||||
*isnull = true;
|
||||
|
||||
return attribute_buf.data;
|
||||
|
||||
|
@ -1346,10 +1353,12 @@ CopyAttributeOut(FILE *fp, char *server_string, char *delim)
|
|||
{
|
||||
char *string;
|
||||
char c;
|
||||
|
||||
#ifdef MULTIBYTE
|
||||
char *string_start;
|
||||
int mblen;
|
||||
int i;
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef MULTIBYTE
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.56 2000/01/29 16:58:34 petere Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.57 2000/04/12 17:14:58 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -32,7 +32,7 @@
|
|||
*/
|
||||
|
||||
static bool checkAttrExists(const char *attributeName,
|
||||
const char *attributeType, List *schema);
|
||||
const char *attributeType, List *schema);
|
||||
static List *MergeAttributes(List *schema, List *supers, List **supconstr);
|
||||
static void StoreCatalogInheritance(Oid relationId, List *supers);
|
||||
|
||||
|
@ -145,14 +145,14 @@ DefineRelation(CreateStmt *stmt, char relkind)
|
|||
StoreCatalogInheritance(relationId, inheritList);
|
||||
|
||||
/*
|
||||
* Now add any newly specified column default values
|
||||
* and CHECK constraints to the new relation. These are passed
|
||||
* to us in the form of raw parsetrees; we need to transform
|
||||
* them to executable expression trees before they can be added.
|
||||
* The most convenient way to do that is to apply the parser's
|
||||
* transformExpr routine, but transformExpr doesn't work unless
|
||||
* we have a pre-existing relation. So, the transformation has
|
||||
* to be postponed to this final step of CREATE TABLE.
|
||||
* Now add any newly specified column default values and CHECK
|
||||
* constraints to the new relation. These are passed to us in the
|
||||
* form of raw parsetrees; we need to transform them to executable
|
||||
* expression trees before they can be added. The most convenient way
|
||||
* to do that is to apply the parser's transformExpr routine, but
|
||||
* transformExpr doesn't work unless we have a pre-existing relation.
|
||||
* So, the transformation has to be postponed to this final step of
|
||||
* CREATE TABLE.
|
||||
*
|
||||
* First, scan schema to find new column defaults.
|
||||
*/
|
||||
|
@ -181,21 +181,24 @@ DefineRelation(CreateStmt *stmt, char relkind)
|
|||
return;
|
||||
|
||||
/*
|
||||
* We must bump the command counter to make the newly-created
|
||||
* relation tuple visible for opening.
|
||||
* We must bump the command counter to make the newly-created relation
|
||||
* tuple visible for opening.
|
||||
*/
|
||||
CommandCounterIncrement();
|
||||
|
||||
/*
|
||||
* Open the new relation.
|
||||
*/
|
||||
rel = heap_openr(relname, AccessExclusiveLock);
|
||||
|
||||
/*
|
||||
* Parse and add the defaults/constraints.
|
||||
*/
|
||||
AddRelationRawConstraints(rel, rawDefaults, stmt->constraints);
|
||||
|
||||
/*
|
||||
* Clean up. We keep lock on new relation (although it shouldn't
|
||||
* be visible to anyone else anyway, until commit).
|
||||
* Clean up. We keep lock on new relation (although it shouldn't be
|
||||
* visible to anyone else anyway, until commit).
|
||||
*/
|
||||
heap_close(rel, NoLock);
|
||||
}
|
||||
|
@ -220,13 +223,13 @@ RemoveRelation(char *name)
|
|||
|
||||
/*
|
||||
* TruncateRelation --
|
||||
* Removes all the rows from a relation
|
||||
* Removes all the rows from a relation
|
||||
*
|
||||
* Exceptions:
|
||||
* BadArg if name is invalid
|
||||
* BadArg if name is invalid
|
||||
*
|
||||
* Note:
|
||||
* Rows are removed, indices are truncated and reconstructed.
|
||||
* Rows are removed, indices are truncated and reconstructed.
|
||||
*/
|
||||
void
|
||||
TruncateRelation(char *name)
|
||||
|
@ -284,6 +287,7 @@ MergeAttributes(List *schema, List *supers, List **supconstr)
|
|||
|
||||
foreach(rest, lnext(entry))
|
||||
{
|
||||
|
||||
/*
|
||||
* check for duplicated names within the new relation
|
||||
*/
|
||||
|
@ -352,11 +356,12 @@ MergeAttributes(List *schema, List *supers, List **supconstr)
|
|||
* check validity
|
||||
*
|
||||
*/
|
||||
if (checkAttrExists(attributeName, attributeType, schema))
|
||||
elog(ERROR, "CREATE TABLE: attribute \"%s\" already exists in inherited schema",
|
||||
attributeName);
|
||||
if (checkAttrExists(attributeName, attributeType, schema))
|
||||
elog(ERROR, "CREATE TABLE: attribute \"%s\" already exists in inherited schema",
|
||||
attributeName);
|
||||
|
||||
if (checkAttrExists(attributeName, attributeType, inhSchema))
|
||||
|
||||
/*
|
||||
* this entry already exists
|
||||
*/
|
||||
|
@ -499,7 +504,7 @@ StoreCatalogInheritance(Oid relationId, List *supers)
|
|||
if (RelationGetForm(relation)->relhasindex)
|
||||
{
|
||||
Relation idescs[Num_pg_inherits_indices];
|
||||
|
||||
|
||||
CatalogOpenIndices(Num_pg_inherits_indices, Name_pg_inherits_indices, idescs);
|
||||
CatalogIndexInsert(idescs, Num_pg_inherits_indices, relation, tuple);
|
||||
CatalogCloseIndices(Num_pg_inherits_indices, idescs);
|
||||
|
@ -642,8 +647,9 @@ checkAttrExists(const char *attributeName, const char *attributeType, List *sche
|
|||
{
|
||||
ColumnDef *def = lfirst(s);
|
||||
|
||||
if (strcmp(attributeName, def->colname)==0)
|
||||
if (strcmp(attributeName, def->colname) == 0)
|
||||
{
|
||||
|
||||
/*
|
||||
* attribute exists. Make sure the types are the same.
|
||||
*/
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.52 2000/03/26 18:32:28 petere Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.53 2000/04/12 17:14:58 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -33,9 +33,9 @@
|
|||
#include "catalog/pg_shadow.h"
|
||||
#include "commands/comment.h"
|
||||
#include "miscadmin.h"
|
||||
#include "storage/bufmgr.h" /* for DropBuffers */
|
||||
#include "storage/fd.h" /* for closeAllVfds */
|
||||
#include "storage/sinval.h" /* for DatabaseHasActiveBackends */
|
||||
#include "storage/bufmgr.h" /* for DropBuffers */
|
||||
#include "storage/fd.h" /* for closeAllVfds */
|
||||
#include "storage/sinval.h" /* for DatabaseHasActiveBackends */
|
||||
#include "utils/builtins.h"
|
||||
#include "utils/elog.h"
|
||||
#include "utils/palloc.h"
|
||||
|
@ -45,10 +45,10 @@
|
|||
|
||||
/* non-export function prototypes */
|
||||
static bool
|
||||
get_user_info(const char *name, int4 *use_sysid, bool *use_super, bool *use_createdb);
|
||||
get_user_info(const char *name, int4 *use_sysid, bool *use_super, bool *use_createdb);
|
||||
|
||||
static bool
|
||||
get_db_info(const char *name, char *dbpath, Oid *dbIdP, int4 *ownerIdP);
|
||||
get_db_info(const char *name, char *dbpath, Oid *dbIdP, int4 *ownerIdP);
|
||||
|
||||
|
||||
|
||||
|
@ -61,99 +61,104 @@ createdb(const char *dbname, const char *dbpath, int encoding)
|
|||
{
|
||||
char buf[2 * MAXPGPATH + 100];
|
||||
char *loc;
|
||||
char locbuf[512];
|
||||
char locbuf[512];
|
||||
int4 user_id;
|
||||
bool use_super, use_createdb;
|
||||
bool use_super,
|
||||
use_createdb;
|
||||
Relation pg_database_rel;
|
||||
HeapTuple tuple;
|
||||
TupleDesc pg_database_dsc;
|
||||
Datum new_record[Natts_pg_database];
|
||||
char new_record_nulls[Natts_pg_database] = { ' ', ' ', ' ', ' ' };
|
||||
TupleDesc pg_database_dsc;
|
||||
Datum new_record[Natts_pg_database];
|
||||
char new_record_nulls[Natts_pg_database] = {' ', ' ', ' ', ' '};
|
||||
|
||||
if (!get_user_info(GetPgUserName(), &user_id, &use_super, &use_createdb))
|
||||
elog(ERROR, "current user name is invalid");
|
||||
if (!get_user_info(GetPgUserName(), &user_id, &use_super, &use_createdb))
|
||||
elog(ERROR, "current user name is invalid");
|
||||
|
||||
if (!use_createdb && !use_super)
|
||||
elog(ERROR, "CREATE DATABASE: permission denied");
|
||||
if (!use_createdb && !use_super)
|
||||
elog(ERROR, "CREATE DATABASE: permission denied");
|
||||
|
||||
if (get_db_info(dbname, NULL, NULL, NULL))
|
||||
elog(ERROR, "CREATE DATABASE: database \"%s\" already exists", dbname);
|
||||
if (get_db_info(dbname, NULL, NULL, NULL))
|
||||
elog(ERROR, "CREATE DATABASE: database \"%s\" already exists", dbname);
|
||||
|
||||
/* don't call this in a transaction block */
|
||||
/* don't call this in a transaction block */
|
||||
if (IsTransactionBlock())
|
||||
elog(ERROR, "CREATE DATABASE: may not be called in a transaction block");
|
||||
elog(ERROR, "CREATE DATABASE: may not be called in a transaction block");
|
||||
|
||||
/* Generate directory name for the new database */
|
||||
if (dbpath == NULL || strcmp(dbpath, dbname)==0)
|
||||
strcpy(locbuf, dbname);
|
||||
else
|
||||
snprintf(locbuf, sizeof(locbuf), "%s/%s", dbpath, dbname);
|
||||
if (dbpath == NULL || strcmp(dbpath, dbname) == 0)
|
||||
strcpy(locbuf, dbname);
|
||||
else
|
||||
snprintf(locbuf, sizeof(locbuf), "%s/%s", dbpath, dbname);
|
||||
|
||||
loc = ExpandDatabasePath(locbuf);
|
||||
loc = ExpandDatabasePath(locbuf);
|
||||
|
||||
if (loc == NULL)
|
||||
elog(ERROR,
|
||||
"The database path '%s' is invalid. "
|
||||
"The database path '%s' is invalid. "
|
||||
"This may be due to a character that is not allowed or because the chosen "
|
||||
"path isn't permitted for databases", dbpath);
|
||||
"path isn't permitted for databases", dbpath);
|
||||
|
||||
/* close virtual file descriptors so the kernel has more available for
|
||||
the system() calls */
|
||||
/*
|
||||
* close virtual file descriptors so the kernel has more available for
|
||||
* the system() calls
|
||||
*/
|
||||
closeAllVfds();
|
||||
|
||||
/*
|
||||
* Insert a new tuple into pg_database
|
||||
*/
|
||||
/*
|
||||
* Insert a new tuple into pg_database
|
||||
*/
|
||||
pg_database_rel = heap_openr(DatabaseRelationName, AccessExclusiveLock);
|
||||
pg_database_dsc = RelationGetDescr(pg_database_rel);
|
||||
|
||||
/* Form tuple */
|
||||
new_record[Anum_pg_database_datname-1] = NameGetDatum(namein(dbname));
|
||||
new_record[Anum_pg_database_datdba-1] = Int32GetDatum(user_id);
|
||||
new_record[Anum_pg_database_encoding-1] = Int32GetDatum(encoding);
|
||||
new_record[Anum_pg_database_datpath-1] = PointerGetDatum(textin(locbuf));
|
||||
/* Form tuple */
|
||||
new_record[Anum_pg_database_datname - 1] = NameGetDatum(namein(dbname));
|
||||
new_record[Anum_pg_database_datdba - 1] = Int32GetDatum(user_id);
|
||||
new_record[Anum_pg_database_encoding - 1] = Int32GetDatum(encoding);
|
||||
new_record[Anum_pg_database_datpath - 1] = PointerGetDatum(textin(locbuf));
|
||||
|
||||
tuple = heap_formtuple(pg_database_dsc, new_record, new_record_nulls);
|
||||
tuple = heap_formtuple(pg_database_dsc, new_record, new_record_nulls);
|
||||
|
||||
/*
|
||||
* Update table
|
||||
*/
|
||||
heap_insert(pg_database_rel, tuple);
|
||||
/*
|
||||
* Update table
|
||||
*/
|
||||
heap_insert(pg_database_rel, tuple);
|
||||
|
||||
/*
|
||||
* Update indexes (there aren't any currently)
|
||||
*/
|
||||
/*
|
||||
* Update indexes (there aren't any currently)
|
||||
*/
|
||||
#ifdef Num_pg_database_indices
|
||||
if (RelationGetForm(pg_database_rel)->relhasindex) {
|
||||
Relation idescs[Num_pg_database_indices];
|
||||
|
||||
CatalogOpenIndices(Num_pg_database_indices,
|
||||
Name_pg_database_indices, idescs);
|
||||
CatalogIndexInsert(idescs, Num_pg_database_indices, pg_database_rel,
|
||||
tuple);
|
||||
CatalogCloseIndices(Num_pg_database_indices, idescs);
|
||||
}
|
||||
if (RelationGetForm(pg_database_rel)->relhasindex)
|
||||
{
|
||||
Relation idescs[Num_pg_database_indices];
|
||||
|
||||
CatalogOpenIndices(Num_pg_database_indices,
|
||||
Name_pg_database_indices, idescs);
|
||||
CatalogIndexInsert(idescs, Num_pg_database_indices, pg_database_rel,
|
||||
tuple);
|
||||
CatalogCloseIndices(Num_pg_database_indices, idescs);
|
||||
}
|
||||
#endif
|
||||
|
||||
heap_close(pg_database_rel, NoLock);
|
||||
|
||||
/* Copy the template database to the new location */
|
||||
/* Copy the template database to the new location */
|
||||
|
||||
if (mkdir(loc, S_IRWXU) != 0) {
|
||||
if (mkdir(loc, S_IRWXU) != 0)
|
||||
elog(ERROR, "CREATE DATABASE: unable to create database directory '%s': %s", loc, strerror(errno));
|
||||
}
|
||||
|
||||
snprintf(buf, sizeof(buf), "cp %s%cbase%ctemplate1%c* '%s'",
|
||||
DataDir, SEP_CHAR, SEP_CHAR, SEP_CHAR, loc);
|
||||
if (system(buf) != 0) {
|
||||
int ret;
|
||||
snprintf(buf, sizeof(buf), "rm -rf '%s'", loc);
|
||||
ret = system(buf);
|
||||
if (ret == 0)
|
||||
elog(ERROR, "CREATE DATABASE: could not initialize database directory");
|
||||
else
|
||||
elog(ERROR, "CREATE DATABASE: Could not initialize database directory. Delete failed as well");
|
||||
}
|
||||
if (system(buf) != 0)
|
||||
{
|
||||
int ret;
|
||||
|
||||
snprintf(buf, sizeof(buf), "rm -rf '%s'", loc);
|
||||
ret = system(buf);
|
||||
if (ret == 0)
|
||||
elog(ERROR, "CREATE DATABASE: could not initialize database directory");
|
||||
else
|
||||
elog(ERROR, "CREATE DATABASE: Could not initialize database directory. Delete failed as well");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -165,18 +170,19 @@ createdb(const char *dbname, const char *dbpath, int encoding)
|
|||
void
|
||||
dropdb(const char *dbname)
|
||||
{
|
||||
int4 user_id, db_owner;
|
||||
bool use_super;
|
||||
int4 user_id,
|
||||
db_owner;
|
||||
bool use_super;
|
||||
Oid db_id;
|
||||
char *path,
|
||||
dbpath[MAXPGPATH],
|
||||
buf[MAXPGPATH + 100];
|
||||
Relation pgdbrel;
|
||||
HeapScanDesc pgdbscan;
|
||||
ScanKeyData key;
|
||||
ScanKeyData key;
|
||||
HeapTuple tup;
|
||||
|
||||
AssertArg(dbname);
|
||||
AssertArg(dbname);
|
||||
|
||||
if (strcmp(dbname, "template1") == 0)
|
||||
elog(ERROR, "DROP DATABASE: May not be executed on the template1 database");
|
||||
|
@ -185,46 +191,49 @@ dropdb(const char *dbname)
|
|||
elog(ERROR, "DROP DATABASE: Cannot be executed on the currently open database");
|
||||
|
||||
if (IsTransactionBlock())
|
||||
elog(ERROR, "DROP DATABASE: May not be called in a transaction block");
|
||||
elog(ERROR, "DROP DATABASE: May not be called in a transaction block");
|
||||
|
||||
if (!get_user_info(GetPgUserName(), &user_id, &use_super, NULL))
|
||||
elog(ERROR, "Current user name is invalid");
|
||||
if (!get_user_info(GetPgUserName(), &user_id, &use_super, NULL))
|
||||
elog(ERROR, "Current user name is invalid");
|
||||
|
||||
if (!get_db_info(dbname, dbpath, &db_id, &db_owner))
|
||||
elog(ERROR, "DROP DATABASE: Database \"%s\" does not exist", dbname);
|
||||
if (!get_db_info(dbname, dbpath, &db_id, &db_owner))
|
||||
elog(ERROR, "DROP DATABASE: Database \"%s\" does not exist", dbname);
|
||||
|
||||
if (user_id != db_owner && !use_super)
|
||||
elog(ERROR, "DROP DATABASE: Permission denied");
|
||||
if (user_id != db_owner && !use_super)
|
||||
elog(ERROR, "DROP DATABASE: Permission denied");
|
||||
|
||||
path = ExpandDatabasePath(dbpath);
|
||||
if (path == NULL)
|
||||
elog(ERROR,
|
||||
"The database path '%s' is invalid. "
|
||||
"The database path '%s' is invalid. "
|
||||
"This may be due to a character that is not allowed or because the chosen "
|
||||
"path isn't permitted for databases", path);
|
||||
"path isn't permitted for databases", path);
|
||||
|
||||
/* close virtual file descriptors so the kernel has more available for
|
||||
the system() calls */
|
||||
/*
|
||||
* close virtual file descriptors so the kernel has more available for
|
||||
* the system() calls
|
||||
*/
|
||||
closeAllVfds();
|
||||
|
||||
/*
|
||||
* Obtain exclusive lock on pg_database. We need this to ensure
|
||||
* that no new backend starts up in the target database while we
|
||||
* are deleting it. (Actually, a new backend might still manage to
|
||||
* start up, because it will read pg_database without any locking
|
||||
* to discover the database's OID. But it will detect its error
|
||||
* in ReverifyMyDatabase and shut down before any serious damage
|
||||
* is done. See postinit.c.)
|
||||
* Obtain exclusive lock on pg_database. We need this to ensure that
|
||||
* no new backend starts up in the target database while we are
|
||||
* deleting it. (Actually, a new backend might still manage to start
|
||||
* up, because it will read pg_database without any locking to
|
||||
* discover the database's OID. But it will detect its error in
|
||||
* ReverifyMyDatabase and shut down before any serious damage is done.
|
||||
* See postinit.c.)
|
||||
*/
|
||||
pgdbrel = heap_openr(DatabaseRelationName, AccessExclusiveLock);
|
||||
|
||||
/*
|
||||
* Check for active backends in the target database.
|
||||
*/
|
||||
if (DatabaseHasActiveBackends(db_id)) {
|
||||
if (DatabaseHasActiveBackends(db_id))
|
||||
{
|
||||
heap_close(pgdbrel, AccessExclusiveLock);
|
||||
elog(ERROR, "DROP DATABASE: Database \"%s\" is being accessed by other users", dbname);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the database's tuple by OID (should be unique, we trust).
|
||||
|
@ -238,8 +247,11 @@ dropdb(const char *dbname)
|
|||
if (!HeapTupleIsValid(tup))
|
||||
{
|
||||
heap_close(pgdbrel, AccessExclusiveLock);
|
||||
/* This error should never come up since the existence of the
|
||||
database is checked earlier */
|
||||
|
||||
/*
|
||||
* This error should never come up since the existence of the
|
||||
* database is checked earlier
|
||||
*/
|
||||
elog(ERROR, "DROP DATABASE: Database \"%s\" doesn't exist despite earlier reports to the contrary",
|
||||
dbname);
|
||||
}
|
||||
|
@ -270,7 +282,7 @@ dropdb(const char *dbname)
|
|||
*/
|
||||
snprintf(buf, sizeof(buf), "rm -rf '%s'", path);
|
||||
if (system(buf) != 0)
|
||||
elog(NOTICE, "DROP DATABASE: The database directory '%s' could not be removed", path);
|
||||
elog(NOTICE, "DROP DATABASE: The database directory '%s' could not be removed", path);
|
||||
}
|
||||
|
||||
|
||||
|
@ -285,11 +297,11 @@ get_db_info(const char *name, char *dbpath, Oid *dbIdP, int4 *ownerIdP)
|
|||
Relation relation;
|
||||
HeapTuple tuple;
|
||||
ScanKeyData scanKey;
|
||||
HeapScanDesc scan;
|
||||
HeapScanDesc scan;
|
||||
|
||||
AssertArg(name);
|
||||
AssertArg(name);
|
||||
|
||||
relation = heap_openr(DatabaseRelationName, AccessExclusiveLock/*???*/);
|
||||
relation = heap_openr(DatabaseRelationName, AccessExclusiveLock /* ??? */ );
|
||||
|
||||
ScanKeyEntryInitialize(&scanKey, 0, Anum_pg_database_datname,
|
||||
F_NAMEEQ, NameGetDatum(name));
|
||||
|
@ -302,76 +314,76 @@ get_db_info(const char *name, char *dbpath, Oid *dbIdP, int4 *ownerIdP)
|
|||
|
||||
if (HeapTupleIsValid(tuple))
|
||||
{
|
||||
text *tmptext;
|
||||
bool isnull;
|
||||
|
||||
/* oid of the database */
|
||||
if (dbIdP)
|
||||
*dbIdP = tuple->t_data->t_oid;
|
||||
/* uid of the owner */
|
||||
if (ownerIdP)
|
||||
{
|
||||
*ownerIdP = (int4) heap_getattr(tuple,
|
||||
Anum_pg_database_datdba,
|
||||
RelationGetDescr(relation),
|
||||
&isnull);
|
||||
if (isnull)
|
||||
*ownerIdP = -1; /* hopefully no one has that id already ;) */
|
||||
}
|
||||
/* database path (as registered in pg_database) */
|
||||
if (dbpath)
|
||||
{
|
||||
tmptext = (text *) heap_getattr(tuple,
|
||||
Anum_pg_database_datpath,
|
||||
RelationGetDescr(relation),
|
||||
&isnull);
|
||||
text *tmptext;
|
||||
bool isnull;
|
||||
|
||||
if (!isnull)
|
||||
{
|
||||
Assert(VARSIZE(tmptext) - VARHDRSZ < MAXPGPATH);
|
||||
/* oid of the database */
|
||||
if (dbIdP)
|
||||
*dbIdP = tuple->t_data->t_oid;
|
||||
/* uid of the owner */
|
||||
if (ownerIdP)
|
||||
{
|
||||
*ownerIdP = (int4) heap_getattr(tuple,
|
||||
Anum_pg_database_datdba,
|
||||
RelationGetDescr(relation),
|
||||
&isnull);
|
||||
if (isnull)
|
||||
*ownerIdP = -1; /* hopefully no one has that id already ;) */
|
||||
}
|
||||
/* database path (as registered in pg_database) */
|
||||
if (dbpath)
|
||||
{
|
||||
tmptext = (text *) heap_getattr(tuple,
|
||||
Anum_pg_database_datpath,
|
||||
RelationGetDescr(relation),
|
||||
&isnull);
|
||||
|
||||
strncpy(dbpath, VARDATA(tmptext), VARSIZE(tmptext) - VARHDRSZ);
|
||||
*(dbpath + VARSIZE(tmptext) - VARHDRSZ) = '\0';
|
||||
}
|
||||
else
|
||||
strcpy(dbpath, "");
|
||||
}
|
||||
if (!isnull)
|
||||
{
|
||||
Assert(VARSIZE(tmptext) - VARHDRSZ < MAXPGPATH);
|
||||
|
||||
strncpy(dbpath, VARDATA(tmptext), VARSIZE(tmptext) - VARHDRSZ);
|
||||
*(dbpath + VARSIZE(tmptext) - VARHDRSZ) = '\0';
|
||||
}
|
||||
else
|
||||
strcpy(dbpath, "");
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (dbIdP)
|
||||
*dbIdP = InvalidOid;
|
||||
}
|
||||
{
|
||||
if (dbIdP)
|
||||
*dbIdP = InvalidOid;
|
||||
}
|
||||
|
||||
heap_endscan(scan);
|
||||
|
||||
/* We will keep the lock on the relation until end of transaction. */
|
||||
heap_close(relation, NoLock);
|
||||
|
||||
return HeapTupleIsValid(tuple);
|
||||
return HeapTupleIsValid(tuple);
|
||||
}
|
||||
|
||||
|
||||
|
||||
static bool
|
||||
get_user_info(const char * name, int4 *use_sysid, bool *use_super, bool *use_createdb)
|
||||
get_user_info(const char *name, int4 *use_sysid, bool *use_super, bool *use_createdb)
|
||||
{
|
||||
HeapTuple utup;
|
||||
HeapTuple utup;
|
||||
|
||||
AssertArg(name);
|
||||
utup = SearchSysCacheTuple(SHADOWNAME,
|
||||
PointerGetDatum(name),
|
||||
0, 0, 0);
|
||||
|
||||
if (!HeapTupleIsValid(utup))
|
||||
return false;
|
||||
if (!HeapTupleIsValid(utup))
|
||||
return false;
|
||||
|
||||
if (use_sysid)
|
||||
*use_sysid = ((Form_pg_shadow) GETSTRUCT(utup))->usesysid;
|
||||
if (use_super)
|
||||
*use_super = ((Form_pg_shadow) GETSTRUCT(utup))->usesuper;
|
||||
if (use_createdb)
|
||||
*use_createdb = ((Form_pg_shadow) GETSTRUCT(utup))->usecreatedb;
|
||||
if (use_sysid)
|
||||
*use_sysid = ((Form_pg_shadow) GETSTRUCT(utup))->usesysid;
|
||||
if (use_super)
|
||||
*use_super = ((Form_pg_shadow) GETSTRUCT(utup))->usesuper;
|
||||
if (use_createdb)
|
||||
*use_createdb = ((Form_pg_shadow) GETSTRUCT(utup))->usecreatedb;
|
||||
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.39 2000/04/07 13:39:24 thomas Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.40 2000/04/12 17:14:58 momjian Exp $
|
||||
*
|
||||
* DESCRIPTION
|
||||
* The "DefineFoo" routines take the parse tree and pick out the
|
||||
|
@ -137,12 +137,13 @@ compute_full_attributes(List *parameters, int32 *byte_pct_p,
|
|||
|
||||
foreach(pl, parameters)
|
||||
{
|
||||
DefElem *param = (DefElem *) lfirst(pl);
|
||||
DefElem *param = (DefElem *) lfirst(pl);
|
||||
|
||||
if (strcasecmp(param->defname, "iscachable") == 0)
|
||||
*canCache_p = true;
|
||||
else if (strcasecmp(param->defname, "trusted") == 0)
|
||||
{
|
||||
|
||||
/*
|
||||
* we don't have untrusted functions any more. The 4.2
|
||||
* implementation is lousy anyway so I took it out. -ay 10/94
|
||||
|
@ -233,12 +234,14 @@ CreateFunction(ProcedureStmt *stmt, CommandDest dest)
|
|||
*/
|
||||
|
||||
bool returnsSet;
|
||||
|
||||
/* The function returns a set of values, as opposed to a singleton. */
|
||||
|
||||
bool lanisPL = false;
|
||||
|
||||
/*
|
||||
* The following are optional user-supplied attributes of the function.
|
||||
* The following are optional user-supplied attributes of the
|
||||
* function.
|
||||
*/
|
||||
int32 byte_pct,
|
||||
perbyte_cpu,
|
||||
|
@ -256,7 +259,7 @@ CreateFunction(ProcedureStmt *stmt, CommandDest dest)
|
|||
elog(ERROR,
|
||||
"Only users with Postgres superuser privilege are "
|
||||
"permitted to create a function "
|
||||
"in the '%s' language. Others may use the 'sql' language "
|
||||
"in the '%s' language. Others may use the 'sql' language "
|
||||
"or the created procedural languages.",
|
||||
languageName);
|
||||
}
|
||||
|
@ -316,17 +319,17 @@ CreateFunction(ProcedureStmt *stmt, CommandDest dest)
|
|||
interpret_AS_clause(languageName, stmt->as, &prosrc_str, &probin_str);
|
||||
|
||||
/*
|
||||
* And now that we have all the parameters, and know we're
|
||||
* permitted to do so, go ahead and create the function.
|
||||
* And now that we have all the parameters, and know we're permitted
|
||||
* to do so, go ahead and create the function.
|
||||
*/
|
||||
ProcedureCreate(stmt->funcname,
|
||||
returnsSet,
|
||||
prorettype,
|
||||
languageName,
|
||||
prosrc_str, /* converted to text later */
|
||||
probin_str, /* converted to text later */
|
||||
prosrc_str, /* converted to text later */
|
||||
probin_str, /* converted to text later */
|
||||
canCache,
|
||||
true, /* (obsolete "trusted") */
|
||||
true, /* (obsolete "trusted") */
|
||||
byte_pct,
|
||||
perbyte_cpu,
|
||||
percall_cpu,
|
||||
|
@ -378,7 +381,7 @@ DefineOperator(char *oprName,
|
|||
if (!strcasecmp(defel->defname, "leftarg"))
|
||||
{
|
||||
if ((nodeTag(defel->arg) == T_TypeName)
|
||||
&& (((TypeName *)defel->arg)->setof))
|
||||
&& (((TypeName *) defel->arg)->setof))
|
||||
elog(ERROR, "setof type not implemented for leftarg");
|
||||
|
||||
typeName1 = defGetString(defel);
|
||||
|
@ -388,7 +391,7 @@ DefineOperator(char *oprName,
|
|||
else if (!strcasecmp(defel->defname, "rightarg"))
|
||||
{
|
||||
if ((nodeTag(defel->arg) == T_TypeName)
|
||||
&& (((TypeName *)defel->arg)->setof))
|
||||
&& (((TypeName *) defel->arg)->setof))
|
||||
elog(ERROR, "setof type not implemented for rightarg");
|
||||
|
||||
typeName2 = defGetString(defel);
|
||||
|
@ -698,16 +701,16 @@ DefineType(char *typeName, List *parameters)
|
|||
static char *
|
||||
defGetString(DefElem *def)
|
||||
{
|
||||
char *string;
|
||||
char *string;
|
||||
|
||||
if (nodeTag(def->arg) == T_String)
|
||||
string = strVal(def->arg);
|
||||
else if (nodeTag(def->arg) == T_TypeName)
|
||||
string = ((TypeName *)def->arg)->name;
|
||||
string = ((TypeName *) def->arg)->name;
|
||||
else
|
||||
string = NULL;
|
||||
#if 0
|
||||
elog(ERROR, "Define: \"%s\" = what?", def->defname);
|
||||
elog(ERROR, "Define: \"%s\" = what?", def->defname);
|
||||
#endif
|
||||
|
||||
return string;
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
* Portions Copyright (c) 1996-2000, PostgreSQL, Inc
|
||||
* Portions Copyright (c) 1994-5, Regents of the University of California
|
||||
*
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.55 2000/03/14 23:06:12 thomas Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.56 2000/04/12 17:14:58 momjian Exp $
|
||||
*
|
||||
*/
|
||||
|
||||
|
@ -209,7 +209,7 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
|
|||
switch (nodeTag(plan))
|
||||
{
|
||||
case T_IndexScan:
|
||||
if (ScanDirectionIsBackward(((IndexScan *)plan)->indxorderdir))
|
||||
if (ScanDirectionIsBackward(((IndexScan *) plan)->indxorderdir))
|
||||
appendStringInfo(str, " Backward");
|
||||
appendStringInfo(str, " using ");
|
||||
i = 0;
|
||||
|
@ -219,7 +219,7 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
|
|||
Assert(relation);
|
||||
appendStringInfo(str, "%s%s",
|
||||
(++i > 1) ? ", " : "",
|
||||
stringStringInfo(RelationGetRelationName(relation)));
|
||||
stringStringInfo(RelationGetRelationName(relation)));
|
||||
/* drop relcache refcount from RelationIdGetRelation */
|
||||
RelationDecrementReferenceCount(relation);
|
||||
}
|
||||
|
@ -238,17 +238,17 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
|
|||
|| (length(rte->ref->attrs) > 0))
|
||||
{
|
||||
appendStringInfo(str, " %s",
|
||||
stringStringInfo(rte->ref->relname));
|
||||
stringStringInfo(rte->ref->relname));
|
||||
|
||||
if (length(rte->ref->attrs) > 0)
|
||||
{
|
||||
List *c;
|
||||
int firstEntry = true;
|
||||
List *c;
|
||||
int firstEntry = true;
|
||||
|
||||
appendStringInfo(str, " (");
|
||||
foreach (c, rte->ref->attrs)
|
||||
foreach(c, rte->ref->attrs)
|
||||
{
|
||||
if (! firstEntry)
|
||||
if (!firstEntry)
|
||||
{
|
||||
appendStringInfo(str, ", ");
|
||||
firstEntry = false;
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.22 2000/02/25 02:58:48 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.23 2000/04/12 17:14:58 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -34,9 +34,9 @@
|
|||
#include "parser/parse_func.h"
|
||||
#include "utils/builtins.h"
|
||||
#include "utils/syscache.h"
|
||||
#include "miscadmin.h" /* ReindexDatabase() */
|
||||
#include "utils/portal.h" /* ReindexDatabase() */
|
||||
#include "catalog/catalog.h" /* ReindexDatabase() */
|
||||
#include "miscadmin.h" /* ReindexDatabase() */
|
||||
#include "utils/portal.h" /* ReindexDatabase() */
|
||||
#include "catalog/catalog.h" /* ReindexDatabase() */
|
||||
|
||||
#define IsFuncIndex(ATTR_LIST) (((IndexElem*)lfirst(ATTR_LIST))->args != NIL)
|
||||
|
||||
|
@ -45,11 +45,11 @@ static void CheckPredicate(List *predList, List *rangeTable, Oid baseRelOid);
|
|||
static void CheckPredExpr(Node *predicate, List *rangeTable, Oid baseRelOid);
|
||||
static void CheckPredClause(Expr *predicate, List *rangeTable, Oid baseRelOid);
|
||||
static void FuncIndexArgs(IndexElem *funcIndex, FuncIndexInfo *funcInfo,
|
||||
AttrNumber *attNumP, Oid *opOidP, Oid relId);
|
||||
AttrNumber *attNumP, Oid *opOidP, Oid relId);
|
||||
static void NormIndexAttrs(List *attList, AttrNumber *attNumP,
|
||||
Oid *opOidP, Oid relId);
|
||||
Oid *opOidP, Oid relId);
|
||||
static void ProcessAttrTypename(IndexElem *attribute,
|
||||
Oid defType, int32 defTypmod);
|
||||
Oid defType, int32 defTypmod);
|
||||
static Oid GetAttrOpClass(IndexElem *attribute, Oid attrType);
|
||||
static char *GetDefaultOpClass(Oid atttypid);
|
||||
|
||||
|
@ -133,7 +133,7 @@ DefineIndex(char *heapRelationName,
|
|||
*/
|
||||
foreach(pl, parameterList)
|
||||
{
|
||||
DefElem *param = (DefElem *) lfirst(pl);
|
||||
DefElem *param = (DefElem *) lfirst(pl);
|
||||
|
||||
if (!strcasecmp(param->defname, "islossy"))
|
||||
lossy = TRUE;
|
||||
|
@ -174,7 +174,7 @@ DefineIndex(char *heapRelationName,
|
|||
namestrcpy(&fInfo.funcName, funcIndex->name);
|
||||
|
||||
attributeNumberA = (AttrNumber *) palloc(nargs *
|
||||
sizeof attributeNumberA[0]);
|
||||
sizeof attributeNumberA[0]);
|
||||
|
||||
classObjectId = (Oid *) palloc(sizeof(Oid));
|
||||
|
||||
|
@ -192,7 +192,7 @@ DefineIndex(char *heapRelationName,
|
|||
else
|
||||
{
|
||||
attributeNumberA = (AttrNumber *) palloc(numberOfAttributes *
|
||||
sizeof attributeNumberA[0]);
|
||||
sizeof attributeNumberA[0]);
|
||||
|
||||
classObjectId = (Oid *) palloc(numberOfAttributes * sizeof(Oid));
|
||||
|
||||
|
@ -490,7 +490,7 @@ NormIndexAttrs(List *attList, /* list of IndexElem's */
|
|||
|
||||
atttuple = SearchSysCacheTupleCopy(ATTNAME,
|
||||
ObjectIdGetDatum(relId),
|
||||
PointerGetDatum(attribute->name),
|
||||
PointerGetDatum(attribute->name),
|
||||
0, 0);
|
||||
if (!HeapTupleIsValid(atttuple))
|
||||
elog(ERROR, "DefineIndex: attribute \"%s\" not found",
|
||||
|
@ -608,7 +608,7 @@ RemoveIndex(char *name)
|
|||
* ...
|
||||
*/
|
||||
void
|
||||
ReindexIndex(const char *name, bool force /* currently unused */)
|
||||
ReindexIndex(const char *name, bool force /* currently unused */ )
|
||||
{
|
||||
HeapTuple tuple;
|
||||
|
||||
|
@ -667,28 +667,35 @@ ReindexTable(const char *name, bool force)
|
|||
* "ERROR" if table nonexistent.
|
||||
* ...
|
||||
*/
|
||||
extern Oid MyDatabaseId;
|
||||
extern Oid MyDatabaseId;
|
||||
void
|
||||
ReindexDatabase(const char *dbname, bool force, bool all)
|
||||
{
|
||||
Relation relation, relationRelation;
|
||||
HeapTuple usertuple, dbtuple, tuple;
|
||||
HeapScanDesc scan;
|
||||
int4 user_id, db_owner;
|
||||
Relation relation,
|
||||
relationRelation;
|
||||
HeapTuple usertuple,
|
||||
dbtuple,
|
||||
tuple;
|
||||
HeapScanDesc scan;
|
||||
int4 user_id,
|
||||
db_owner;
|
||||
bool superuser;
|
||||
Oid db_id;
|
||||
char *username;
|
||||
ScanKeyData scankey;
|
||||
PortalVariableMemory pmem;
|
||||
MemoryContext old;
|
||||
int relcnt, relalc, i, oncealc = 200;
|
||||
Oid *relids = (Oid *) NULL;
|
||||
Oid db_id;
|
||||
char *username;
|
||||
ScanKeyData scankey;
|
||||
PortalVariableMemory pmem;
|
||||
MemoryContext old;
|
||||
int relcnt,
|
||||
relalc,
|
||||
i,
|
||||
oncealc = 200;
|
||||
Oid *relids = (Oid *) NULL;
|
||||
|
||||
AssertArg(dbname);
|
||||
|
||||
username = GetPgUserName();
|
||||
usertuple = SearchSysCacheTuple(SHADOWNAME, PointerGetDatum(username),
|
||||
0, 0, 0);
|
||||
0, 0, 0);
|
||||
if (!HeapTupleIsValid(usertuple))
|
||||
elog(ERROR, "Current user '%s' is invalid.", username);
|
||||
user_id = ((Form_pg_shadow) GETSTRUCT(usertuple))->usesysid;
|
||||
|
@ -696,7 +703,7 @@ ReindexDatabase(const char *dbname, bool force, bool all)
|
|||
|
||||
relation = heap_openr(DatabaseRelationName, AccessShareLock);
|
||||
ScanKeyEntryInitialize(&scankey, 0, Anum_pg_database_datname,
|
||||
F_NAMEEQ, NameGetDatum(dbname));
|
||||
F_NAMEEQ, NameGetDatum(dbname));
|
||||
scan = heap_beginscan(relation, 0, SnapshotNow, 1, &scankey);
|
||||
dbtuple = heap_getnext(scan, 0);
|
||||
if (!HeapTupleIsValid(dbtuple))
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/remove.c,v 1.45 2000/01/26 05:56:13 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/remove.c,v 1.46 2000/04/12 17:14:59 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -156,14 +156,15 @@ SingleOpOperatorRemove(Oid typeOid)
|
|||
{
|
||||
key[0].sk_attno = attnums[i];
|
||||
scan = heap_beginscan(rel, 0, SnapshotNow, 1, key);
|
||||
while (HeapTupleIsValid(tup = heap_getnext(scan, 0))) {
|
||||
while (HeapTupleIsValid(tup = heap_getnext(scan, 0)))
|
||||
{
|
||||
|
||||
/*** This is apparently a routine not in use, but remove ***/
|
||||
/*** any comments anyways ***/
|
||||
/*** This is apparently a routine not in use, but remove ***/
|
||||
/*** any comments anyways ***/
|
||||
|
||||
DeleteComments(tup->t_data->t_oid);
|
||||
DeleteComments(tup->t_data->t_oid);
|
||||
|
||||
heap_delete(rel, &tup->t_self, NULL);
|
||||
heap_delete(rel, &tup->t_self, NULL);
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/rename.c,v 1.41 2000/01/26 05:56:13 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/rename.c,v 1.42 2000/04/12 17:14:59 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -76,12 +76,12 @@ renameatt(char *relname,
|
|||
#endif
|
||||
|
||||
/*
|
||||
* Grab an exclusive lock on the target table, which we will NOT release
|
||||
* until end of transaction.
|
||||
* Grab an exclusive lock on the target table, which we will NOT
|
||||
* release until end of transaction.
|
||||
*/
|
||||
targetrelation = heap_openr(relname, AccessExclusiveLock);
|
||||
relid = RelationGetRelid(targetrelation);
|
||||
heap_close(targetrelation, NoLock); /* close rel but keep lock! */
|
||||
heap_close(targetrelation, NoLock); /* close rel but keep lock! */
|
||||
|
||||
/*
|
||||
* if the 'recurse' flag is set then we are supposed to rename this
|
||||
|
@ -160,11 +160,12 @@ renameatt(char *relname,
|
|||
/* keep system catalog indices current */
|
||||
{
|
||||
Relation irelations[Num_pg_attr_indices];
|
||||
|
||||
CatalogOpenIndices(Num_pg_attr_indices, Name_pg_attr_indices, irelations);
|
||||
CatalogIndexInsert(irelations, Num_pg_attr_indices, attrelation, oldatttup);
|
||||
CatalogCloseIndices(Num_pg_attr_indices, irelations);
|
||||
}
|
||||
|
||||
|
||||
heap_freetuple(oldatttup);
|
||||
heap_close(attrelation, RowExclusiveLock);
|
||||
}
|
||||
|
@ -194,8 +195,8 @@ renamerel(const char *oldrelname, const char *newrelname)
|
|||
newrelname);
|
||||
|
||||
/*
|
||||
* Grab an exclusive lock on the target table, which we will NOT release
|
||||
* until end of transaction.
|
||||
* Grab an exclusive lock on the target table, which we will NOT
|
||||
* release until end of transaction.
|
||||
*/
|
||||
targetrelation = heap_openr(oldrelname, AccessExclusiveLock);
|
||||
|
||||
|
@ -211,14 +212,15 @@ renamerel(const char *oldrelname, const char *newrelname)
|
|||
* they don't exist anyway. So, no warning in that case.
|
||||
* ----------------
|
||||
*/
|
||||
if (IsTransactionBlock() && ! targetrelation->rd_myxactonly)
|
||||
if (IsTransactionBlock() && !targetrelation->rd_myxactonly)
|
||||
elog(NOTICE, "Caution: RENAME TABLE cannot be rolled back, so don't abort now");
|
||||
|
||||
/*
|
||||
* Flush all blocks of the relation out of the buffer pool. We need this
|
||||
* because the blocks are marked with the relation's name as well as OID.
|
||||
* If some backend tries to write a dirty buffer with mdblindwrt after
|
||||
* we've renamed the physical file, we'll be in big trouble.
|
||||
* Flush all blocks of the relation out of the buffer pool. We need
|
||||
* this because the blocks are marked with the relation's name as well
|
||||
* as OID. If some backend tries to write a dirty buffer with
|
||||
* mdblindwrt after we've renamed the physical file, we'll be in big
|
||||
* trouble.
|
||||
*
|
||||
* Since we hold the exclusive lock on the relation, we don't have to
|
||||
* worry about more blocks being read in while we finish the rename.
|
||||
|
@ -227,8 +229,8 @@ renamerel(const char *oldrelname, const char *newrelname)
|
|||
elog(ERROR, "renamerel: unable to flush relation from buffer pool");
|
||||
|
||||
/*
|
||||
* Make sure smgr and lower levels close the relation's files.
|
||||
* (Next access to rel will reopen them.)
|
||||
* Make sure smgr and lower levels close the relation's files. (Next
|
||||
* access to rel will reopen them.)
|
||||
*
|
||||
* Note: we rely on shared cache invalidation message to make other
|
||||
* backends close and re-open the files.
|
||||
|
@ -238,14 +240,15 @@ renamerel(const char *oldrelname, const char *newrelname)
|
|||
/*
|
||||
* Close rel, but keep exclusive lock!
|
||||
*
|
||||
* Note: we don't do anything about updating the relcache entry;
|
||||
* we assume it will be flushed by shared cache invalidate.
|
||||
* XXX is this good enough? What if relation is myxactonly?
|
||||
* Note: we don't do anything about updating the relcache entry; we
|
||||
* assume it will be flushed by shared cache invalidate. XXX is this
|
||||
* good enough? What if relation is myxactonly?
|
||||
*/
|
||||
heap_close(targetrelation, NoLock);
|
||||
|
||||
/*
|
||||
* Find relation's pg_class tuple, and make sure newrelname isn't in use.
|
||||
* Find relation's pg_class tuple, and make sure newrelname isn't in
|
||||
* use.
|
||||
*/
|
||||
relrelation = heap_openr(RelationRelationName, RowExclusiveLock);
|
||||
|
||||
|
@ -262,8 +265,8 @@ renamerel(const char *oldrelname, const char *newrelname)
|
|||
* Perform physical rename of files. If this fails, we haven't yet
|
||||
* done anything irreversible.
|
||||
*
|
||||
* XXX smgr.c ought to provide an interface for this; doing it
|
||||
* directly is bletcherous.
|
||||
* XXX smgr.c ought to provide an interface for this; doing it directly
|
||||
* is bletcherous.
|
||||
*/
|
||||
strcpy(oldpath, relpath(oldrelname));
|
||||
strcpy(newpath, relpath(newrelname));
|
||||
|
|
|
@ -410,7 +410,9 @@ init_sequence(char *caller, char *name)
|
|||
|
||||
if (elm != (SeqTable) NULL)
|
||||
{
|
||||
/* We are using a seqtable entry left over from a previous xact;
|
||||
|
||||
/*
|
||||
* We are using a seqtable entry left over from a previous xact;
|
||||
* must check for relid change.
|
||||
*/
|
||||
elm->rel = seqrel;
|
||||
|
@ -424,7 +426,9 @@ init_sequence(char *caller, char *name)
|
|||
}
|
||||
else
|
||||
{
|
||||
/* Time to make a new seqtable entry. These entries live as long
|
||||
|
||||
/*
|
||||
* Time to make a new seqtable entry. These entries live as long
|
||||
* as the backend does, so we use plain malloc for them.
|
||||
*/
|
||||
elm = (SeqTable) malloc(sizeof(SeqTableData));
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.62 2000/02/29 12:28:24 wieck Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.63 2000/04/12 17:14:59 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -89,7 +89,7 @@ CreateTrigger(CreateTrigStmt *stmt)
|
|||
rel = heap_openr(stmt->constrrelname, NoLock);
|
||||
if (rel == NULL)
|
||||
elog(ERROR, "table \"%s\" does not exist",
|
||||
stmt->constrrelname);
|
||||
stmt->constrrelname);
|
||||
constrrelid = rel->rd_id;
|
||||
heap_close(rel, NoLock);
|
||||
}
|
||||
|
@ -182,12 +182,12 @@ CreateTrigger(CreateTrigStmt *stmt)
|
|||
values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(tuple->t_data->t_oid);
|
||||
values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
|
||||
|
||||
values[Anum_pg_trigger_tgenabled - 1] = true;
|
||||
values[Anum_pg_trigger_tgisconstraint - 1] = stmt->isconstraint;
|
||||
values[Anum_pg_trigger_tgconstrname - 1] = PointerGetDatum(constrname);;
|
||||
values[Anum_pg_trigger_tgconstrrelid - 1] = constrrelid;
|
||||
values[Anum_pg_trigger_tgdeferrable - 1] = stmt->deferrable;
|
||||
values[Anum_pg_trigger_tginitdeferred - 1] = stmt->initdeferred;
|
||||
values[Anum_pg_trigger_tgenabled - 1] = true;
|
||||
values[Anum_pg_trigger_tgisconstraint - 1] = stmt->isconstraint;
|
||||
values[Anum_pg_trigger_tgconstrname - 1] = PointerGetDatum(constrname);;
|
||||
values[Anum_pg_trigger_tgconstrrelid - 1] = constrrelid;
|
||||
values[Anum_pg_trigger_tgdeferrable - 1] = stmt->deferrable;
|
||||
values[Anum_pg_trigger_tginitdeferred - 1] = stmt->initdeferred;
|
||||
|
||||
if (stmt->args)
|
||||
{
|
||||
|
@ -261,10 +261,11 @@ CreateTrigger(CreateTrigStmt *stmt)
|
|||
CatalogCloseIndices(Num_pg_class_indices, ridescs);
|
||||
heap_freetuple(tuple);
|
||||
heap_close(pgrel, RowExclusiveLock);
|
||||
|
||||
/*
|
||||
* We used to try to update the rel's relcache entry here, but that's
|
||||
* fairly pointless since it will happen as a byproduct of the upcoming
|
||||
* CommandCounterIncrement...
|
||||
* fairly pointless since it will happen as a byproduct of the
|
||||
* upcoming CommandCounterIncrement...
|
||||
*/
|
||||
/* Keep lock on target rel until end of xact */
|
||||
heap_close(rel, NoLock);
|
||||
|
@ -301,12 +302,12 @@ DropTrigger(DropTrigStmt *stmt)
|
|||
if (namestrcmp(&(pg_trigger->tgname), stmt->trigname) == 0)
|
||||
{
|
||||
|
||||
/*** Delete any comments associated with this trigger ***/
|
||||
/*** Delete any comments associated with this trigger ***/
|
||||
|
||||
DeleteComments(tuple->t_data->t_oid);
|
||||
DeleteComments(tuple->t_data->t_oid);
|
||||
|
||||
heap_delete(tgrel, &tuple->t_self, NULL);
|
||||
tgfound++;
|
||||
heap_delete(tgrel, &tuple->t_self, NULL);
|
||||
tgfound++;
|
||||
|
||||
}
|
||||
else
|
||||
|
@ -337,10 +338,11 @@ DropTrigger(DropTrigStmt *stmt)
|
|||
CatalogCloseIndices(Num_pg_class_indices, ridescs);
|
||||
heap_freetuple(tuple);
|
||||
heap_close(pgrel, RowExclusiveLock);
|
||||
|
||||
/*
|
||||
* We used to try to update the rel's relcache entry here, but that's
|
||||
* fairly pointless since it will happen as a byproduct of the upcoming
|
||||
* CommandCounterIncrement...
|
||||
* fairly pointless since it will happen as a byproduct of the
|
||||
* upcoming CommandCounterIncrement...
|
||||
*/
|
||||
/* Keep lock on target rel until end of xact */
|
||||
heap_close(rel, NoLock);
|
||||
|
@ -360,13 +362,14 @@ RelationRemoveTriggers(Relation rel)
|
|||
|
||||
tgscan = heap_beginscan(tgrel, 0, SnapshotNow, 1, &key);
|
||||
|
||||
while (HeapTupleIsValid(tup = heap_getnext(tgscan, 0))) {
|
||||
while (HeapTupleIsValid(tup = heap_getnext(tgscan, 0)))
|
||||
{
|
||||
|
||||
/*** Delete any comments associated with this trigger ***/
|
||||
/*** Delete any comments associated with this trigger ***/
|
||||
|
||||
DeleteComments(tup->t_data->t_oid);
|
||||
DeleteComments(tup->t_data->t_oid);
|
||||
|
||||
heap_delete(tgrel, &tup->t_self, NULL);
|
||||
heap_delete(tgrel, &tup->t_self, NULL);
|
||||
|
||||
}
|
||||
|
||||
|
@ -385,14 +388,14 @@ RelationRemoveTriggers(Relation rel)
|
|||
* ----------
|
||||
*/
|
||||
ScanKeyEntryInitialize(&key, 0, Anum_pg_trigger_tgconstrrelid,
|
||||
F_OIDEQ, RelationGetRelid(rel));
|
||||
F_OIDEQ, RelationGetRelid(rel));
|
||||
|
||||
tgscan = heap_beginscan(tgrel, 0, SnapshotNow, 1, &key);
|
||||
while (HeapTupleIsValid(tup = heap_getnext(tgscan, 0)))
|
||||
{
|
||||
Form_pg_trigger pg_trigger;
|
||||
Relation refrel;
|
||||
DropTrigStmt stmt;
|
||||
Form_pg_trigger pg_trigger;
|
||||
Relation refrel;
|
||||
DropTrigStmt stmt;
|
||||
|
||||
pg_trigger = (Form_pg_trigger) GETSTRUCT(tup);
|
||||
|
||||
|
@ -436,8 +439,8 @@ RelationBuildTriggers(Relation relation)
|
|||
Relation irel = (Relation) NULL;
|
||||
ScanKeyData skey;
|
||||
HeapTupleData tuple;
|
||||
IndexScanDesc sd = (IndexScanDesc) NULL;
|
||||
HeapScanDesc tgscan = (HeapScanDesc) NULL;
|
||||
IndexScanDesc sd = (IndexScanDesc) NULL;
|
||||
HeapScanDesc tgscan = (HeapScanDesc) NULL;
|
||||
HeapTuple htup;
|
||||
RetrieveIndexResult indexRes;
|
||||
Buffer buffer;
|
||||
|
@ -684,13 +687,13 @@ FreeTriggerDesc(TriggerDesc *trigdesc)
|
|||
bool
|
||||
equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
|
||||
{
|
||||
int i,
|
||||
j;
|
||||
int i,
|
||||
j;
|
||||
|
||||
/*
|
||||
* We need not examine the "index" data, just the trigger array itself;
|
||||
* if we have the same triggers with the same types, the derived index
|
||||
* data should match.
|
||||
* We need not examine the "index" data, just the trigger array
|
||||
* itself; if we have the same triggers with the same types, the
|
||||
* derived index data should match.
|
||||
*
|
||||
* XXX It seems possible that the same triggers could appear in different
|
||||
* orders in the two trigger arrays; do we need to handle that?
|
||||
|
@ -703,8 +706,8 @@ equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
|
|||
return false;
|
||||
for (i = 0; i < trigdesc1->numtriggers; i++)
|
||||
{
|
||||
Trigger *trig1 = trigdesc1->triggers + i;
|
||||
Trigger *trig2 = NULL;
|
||||
Trigger *trig1 = trigdesc1->triggers + i;
|
||||
Trigger *trig2 = NULL;
|
||||
|
||||
/*
|
||||
* We can't assume that the triggers are always read from
|
||||
|
@ -1014,31 +1017,31 @@ ltrmark:;
|
|||
* end.
|
||||
* ----------
|
||||
*/
|
||||
static GlobalMemory deftrig_gcxt = NULL;
|
||||
static GlobalMemory deftrig_cxt = NULL;
|
||||
static GlobalMemory deftrig_gcxt = NULL;
|
||||
static GlobalMemory deftrig_cxt = NULL;
|
||||
|
||||
/* ----------
|
||||
* Global data that tells which triggers are actually in
|
||||
* state IMMEDIATE or DEFERRED.
|
||||
* ----------
|
||||
*/
|
||||
static bool deftrig_dfl_all_isset = false;
|
||||
static bool deftrig_dfl_all_isdeferred = false;
|
||||
static List *deftrig_dfl_trigstates = NIL;
|
||||
static bool deftrig_dfl_all_isset = false;
|
||||
static bool deftrig_dfl_all_isdeferred = false;
|
||||
static List *deftrig_dfl_trigstates = NIL;
|
||||
|
||||
static bool deftrig_all_isset;
|
||||
static bool deftrig_all_isdeferred;
|
||||
static List *deftrig_trigstates;
|
||||
static bool deftrig_all_isset;
|
||||
static bool deftrig_all_isdeferred;
|
||||
static List *deftrig_trigstates;
|
||||
|
||||
/* ----------
|
||||
* The list of events during the entire transaction.
|
||||
*
|
||||
* XXX This must finally be held in a file because of the huge
|
||||
* number of events that could occur in the real world.
|
||||
* number of events that could occur in the real world.
|
||||
* ----------
|
||||
*/
|
||||
static int deftrig_n_events;
|
||||
static List *deftrig_events;
|
||||
static int deftrig_n_events;
|
||||
static List *deftrig_events;
|
||||
|
||||
|
||||
/* ----------
|
||||
|
@ -1051,9 +1054,9 @@ static List *deftrig_events;
|
|||
static bool
|
||||
deferredTriggerCheckState(Oid tgoid, int32 itemstate)
|
||||
{
|
||||
MemoryContext oldcxt;
|
||||
List *sl;
|
||||
DeferredTriggerStatus trigstate;
|
||||
MemoryContext oldcxt;
|
||||
List *sl;
|
||||
DeferredTriggerStatus trigstate;
|
||||
|
||||
/* ----------
|
||||
* Not deferrable triggers (i.e. normal AFTER ROW triggers
|
||||
|
@ -1068,7 +1071,7 @@ deferredTriggerCheckState(Oid tgoid, int32 itemstate)
|
|||
* Lookup if we know an individual state for this trigger
|
||||
* ----------
|
||||
*/
|
||||
foreach (sl, deftrig_trigstates)
|
||||
foreach(sl, deftrig_trigstates)
|
||||
{
|
||||
trigstate = (DeferredTriggerStatus) lfirst(sl);
|
||||
if (trigstate->dts_tgoid == tgoid)
|
||||
|
@ -1092,10 +1095,10 @@ deferredTriggerCheckState(Oid tgoid, int32 itemstate)
|
|||
oldcxt = MemoryContextSwitchTo((MemoryContext) deftrig_cxt);
|
||||
|
||||
trigstate = (DeferredTriggerStatus)
|
||||
palloc(sizeof(DeferredTriggerStatusData));
|
||||
trigstate->dts_tgoid = tgoid;
|
||||
trigstate->dts_tgisdeferred =
|
||||
((itemstate & TRIGGER_DEFERRED_INITDEFERRED) != 0);
|
||||
palloc(sizeof(DeferredTriggerStatusData));
|
||||
trigstate->dts_tgoid = tgoid;
|
||||
trigstate->dts_tgisdeferred =
|
||||
((itemstate & TRIGGER_DEFERRED_INITDEFERRED) != 0);
|
||||
deftrig_trigstates = lappend(deftrig_trigstates, trigstate);
|
||||
|
||||
MemoryContextSwitchTo(oldcxt);
|
||||
|
@ -1130,8 +1133,8 @@ deferredTriggerAddEvent(DeferredTriggerEvent event)
|
|||
static DeferredTriggerEvent
|
||||
deferredTriggerGetPreviousEvent(Oid relid, ItemPointer ctid)
|
||||
{
|
||||
DeferredTriggerEvent previous;
|
||||
int n;
|
||||
DeferredTriggerEvent previous;
|
||||
int n;
|
||||
|
||||
for (n = deftrig_n_events - 1; n >= 0; n--)
|
||||
{
|
||||
|
@ -1143,15 +1146,15 @@ deferredTriggerGetPreviousEvent(Oid relid, ItemPointer ctid)
|
|||
continue;
|
||||
|
||||
if (ItemPointerGetBlockNumber(ctid) ==
|
||||
ItemPointerGetBlockNumber(&(previous->dte_newctid)) &&
|
||||
ItemPointerGetOffsetNumber(ctid) ==
|
||||
ItemPointerGetOffsetNumber(&(previous->dte_newctid)))
|
||||
ItemPointerGetBlockNumber(&(previous->dte_newctid)) &&
|
||||
ItemPointerGetOffsetNumber(ctid) ==
|
||||
ItemPointerGetOffsetNumber(&(previous->dte_newctid)))
|
||||
return previous;
|
||||
}
|
||||
|
||||
elog(ERROR,
|
||||
"deferredTriggerGetPreviousEvent(): event for tuple %s not found",
|
||||
tidout(ctid));
|
||||
"deferredTriggerGetPreviousEvent(): event for tuple %s not found",
|
||||
tidout(ctid));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -1166,13 +1169,13 @@ deferredTriggerGetPreviousEvent(Oid relid, ItemPointer ctid)
|
|||
static void
|
||||
deferredTriggerExecute(DeferredTriggerEvent event, int itemno)
|
||||
{
|
||||
Relation rel;
|
||||
TriggerData SaveTriggerData;
|
||||
HeapTupleData oldtuple;
|
||||
HeapTupleData newtuple;
|
||||
HeapTuple rettuple;
|
||||
Buffer oldbuffer;
|
||||
Buffer newbuffer;
|
||||
Relation rel;
|
||||
TriggerData SaveTriggerData;
|
||||
HeapTupleData oldtuple;
|
||||
HeapTupleData newtuple;
|
||||
HeapTuple rettuple;
|
||||
Buffer oldbuffer;
|
||||
Buffer newbuffer;
|
||||
|
||||
/* ----------
|
||||
* Open the heap and fetch the required OLD and NEW tuples.
|
||||
|
@ -1200,31 +1203,31 @@ deferredTriggerExecute(DeferredTriggerEvent event, int itemno)
|
|||
* Setup the trigger information
|
||||
* ----------
|
||||
*/
|
||||
SaveTriggerData.tg_event = (event->dte_event & TRIGGER_EVENT_OPMASK) |
|
||||
TRIGGER_EVENT_ROW;
|
||||
SaveTriggerData.tg_event = (event->dte_event & TRIGGER_EVENT_OPMASK) |
|
||||
TRIGGER_EVENT_ROW;
|
||||
SaveTriggerData.tg_relation = rel;
|
||||
|
||||
switch (event->dte_event & TRIGGER_EVENT_OPMASK)
|
||||
{
|
||||
case TRIGGER_EVENT_INSERT:
|
||||
SaveTriggerData.tg_trigtuple = &newtuple;
|
||||
SaveTriggerData.tg_newtuple = NULL;
|
||||
SaveTriggerData.tg_trigger =
|
||||
rel->trigdesc->tg_after_row[TRIGGER_EVENT_INSERT][itemno];
|
||||
SaveTriggerData.tg_newtuple = NULL;
|
||||
SaveTriggerData.tg_trigger =
|
||||
rel->trigdesc->tg_after_row[TRIGGER_EVENT_INSERT][itemno];
|
||||
break;
|
||||
|
||||
case TRIGGER_EVENT_UPDATE:
|
||||
SaveTriggerData.tg_trigtuple = &oldtuple;
|
||||
SaveTriggerData.tg_newtuple = &newtuple;
|
||||
SaveTriggerData.tg_trigger =
|
||||
rel->trigdesc->tg_after_row[TRIGGER_EVENT_UPDATE][itemno];
|
||||
SaveTriggerData.tg_newtuple = &newtuple;
|
||||
SaveTriggerData.tg_trigger =
|
||||
rel->trigdesc->tg_after_row[TRIGGER_EVENT_UPDATE][itemno];
|
||||
break;
|
||||
|
||||
case TRIGGER_EVENT_DELETE:
|
||||
SaveTriggerData.tg_trigtuple = &oldtuple;
|
||||
SaveTriggerData.tg_newtuple = NULL;
|
||||
SaveTriggerData.tg_trigger =
|
||||
rel->trigdesc->tg_after_row[TRIGGER_EVENT_DELETE][itemno];
|
||||
SaveTriggerData.tg_newtuple = NULL;
|
||||
SaveTriggerData.tg_trigger =
|
||||
rel->trigdesc->tg_after_row[TRIGGER_EVENT_DELETE][itemno];
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1271,11 +1274,11 @@ deferredTriggerExecute(DeferredTriggerEvent event, int itemno)
|
|||
static void
|
||||
deferredTriggerInvokeEvents(bool immediate_only)
|
||||
{
|
||||
List *el;
|
||||
DeferredTriggerEvent event;
|
||||
int still_deferred_ones;
|
||||
int eventno = -1;
|
||||
int i;
|
||||
List *el;
|
||||
DeferredTriggerEvent event;
|
||||
int still_deferred_ones;
|
||||
int eventno = -1;
|
||||
int i;
|
||||
|
||||
/* ----------
|
||||
* For now we process all events - to speedup transaction blocks
|
||||
|
@ -1286,7 +1289,7 @@ deferredTriggerInvokeEvents(bool immediate_only)
|
|||
* SET CONSTRAINTS ... command finishes and calls EndQuery.
|
||||
* ----------
|
||||
*/
|
||||
foreach (el, deftrig_events)
|
||||
foreach(el, deftrig_events)
|
||||
{
|
||||
eventno++;
|
||||
|
||||
|
@ -1315,8 +1318,8 @@ deferredTriggerInvokeEvents(bool immediate_only)
|
|||
* ----------
|
||||
*/
|
||||
if (immediate_only && deferredTriggerCheckState(
|
||||
event->dte_item[i].dti_tgoid,
|
||||
event->dte_item[i].dti_state))
|
||||
event->dte_item[i].dti_tgoid,
|
||||
event->dte_item[i].dti_state))
|
||||
{
|
||||
still_deferred_ones = true;
|
||||
continue;
|
||||
|
@ -1367,34 +1370,34 @@ DeferredTriggerInit(void)
|
|||
void
|
||||
DeferredTriggerBeginXact(void)
|
||||
{
|
||||
MemoryContext oldcxt;
|
||||
List *l;
|
||||
DeferredTriggerStatus dflstat;
|
||||
DeferredTriggerStatus stat;
|
||||
MemoryContext oldcxt;
|
||||
List *l;
|
||||
DeferredTriggerStatus dflstat;
|
||||
DeferredTriggerStatus stat;
|
||||
|
||||
if (deftrig_cxt != NULL)
|
||||
elog(FATAL,
|
||||
"DeferredTriggerBeginXact() called while inside transaction");
|
||||
"DeferredTriggerBeginXact() called while inside transaction");
|
||||
|
||||
/* ----------
|
||||
* Create the per transaction memory context and copy all states
|
||||
* from the per session context to here.
|
||||
* ----------
|
||||
*/
|
||||
deftrig_cxt = CreateGlobalMemory("DeferredTriggerXact");
|
||||
oldcxt = MemoryContextSwitchTo((MemoryContext)deftrig_cxt);
|
||||
deftrig_cxt = CreateGlobalMemory("DeferredTriggerXact");
|
||||
oldcxt = MemoryContextSwitchTo((MemoryContext) deftrig_cxt);
|
||||
|
||||
deftrig_all_isset = deftrig_dfl_all_isset;
|
||||
deftrig_all_isdeferred = deftrig_dfl_all_isdeferred;
|
||||
deftrig_all_isset = deftrig_dfl_all_isset;
|
||||
deftrig_all_isdeferred = deftrig_dfl_all_isdeferred;
|
||||
|
||||
deftrig_trigstates = NIL;
|
||||
foreach (l, deftrig_dfl_trigstates)
|
||||
deftrig_trigstates = NIL;
|
||||
foreach(l, deftrig_dfl_trigstates)
|
||||
{
|
||||
dflstat = (DeferredTriggerStatus) lfirst(l);
|
||||
stat = (DeferredTriggerStatus)
|
||||
palloc(sizeof(DeferredTriggerStatusData));
|
||||
stat = (DeferredTriggerStatus)
|
||||
palloc(sizeof(DeferredTriggerStatusData));
|
||||
|
||||
stat->dts_tgoid = dflstat->dts_tgoid;
|
||||
stat->dts_tgoid = dflstat->dts_tgoid;
|
||||
stat->dts_tgisdeferred = dflstat->dts_tgisdeferred;
|
||||
|
||||
deftrig_trigstates = lappend(deftrig_trigstates, stat);
|
||||
|
@ -1402,8 +1405,8 @@ DeferredTriggerBeginXact(void)
|
|||
|
||||
MemoryContextSwitchTo(oldcxt);
|
||||
|
||||
deftrig_n_events = 0;
|
||||
deftrig_events = NIL;
|
||||
deftrig_n_events = 0;
|
||||
deftrig_events = NIL;
|
||||
}
|
||||
|
||||
|
||||
|
@ -1484,22 +1487,23 @@ DeferredTriggerAbortXact(void)
|
|||
void
|
||||
DeferredTriggerSetState(ConstraintsSetStmt *stmt)
|
||||
{
|
||||
Relation tgrel;
|
||||
Relation irel = (Relation) NULL;
|
||||
List *l;
|
||||
List *ls;
|
||||
List *lnext;
|
||||
List *loid = NIL;
|
||||
MemoryContext oldcxt;
|
||||
bool found;
|
||||
DeferredTriggerStatus state;
|
||||
bool hasindex;
|
||||
Relation tgrel;
|
||||
Relation irel = (Relation) NULL;
|
||||
List *l;
|
||||
List *ls;
|
||||
List *lnext;
|
||||
List *loid = NIL;
|
||||
MemoryContext oldcxt;
|
||||
bool found;
|
||||
DeferredTriggerStatus state;
|
||||
bool hasindex;
|
||||
|
||||
/* ----------
|
||||
* Handle SET CONSTRAINTS ALL ...
|
||||
* ----------
|
||||
*/
|
||||
if (stmt->constraints == NIL) {
|
||||
if (stmt->constraints == NIL)
|
||||
{
|
||||
if (!IsTransactionBlock())
|
||||
{
|
||||
/* ----------
|
||||
|
@ -1527,13 +1531,15 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
|
|||
* Set the session ALL state to known.
|
||||
* ----------
|
||||
*/
|
||||
deftrig_dfl_all_isset = true;
|
||||
deftrig_dfl_all_isset = true;
|
||||
deftrig_dfl_all_isdeferred = stmt->deferred;
|
||||
|
||||
MemoryContextSwitchTo(oldcxt);
|
||||
|
||||
return;
|
||||
} else {
|
||||
}
|
||||
else
|
||||
{
|
||||
/* ----------
|
||||
* ... inside of a transaction block
|
||||
* ----------
|
||||
|
@ -1559,7 +1565,7 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
|
|||
* Set the per transaction ALL state to known.
|
||||
* ----------
|
||||
*/
|
||||
deftrig_all_isset = true;
|
||||
deftrig_all_isset = true;
|
||||
deftrig_all_isdeferred = stmt->deferred;
|
||||
|
||||
MemoryContextSwitchTo(oldcxt);
|
||||
|
@ -1578,23 +1584,23 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
|
|||
if (hasindex)
|
||||
irel = index_openr(TriggerConstrNameIndex);
|
||||
|
||||
foreach (l, stmt->constraints)
|
||||
foreach(l, stmt->constraints)
|
||||
{
|
||||
ScanKeyData skey;
|
||||
HeapTupleData tuple;
|
||||
IndexScanDesc sd = (IndexScanDesc) NULL;
|
||||
HeapScanDesc tgscan = (HeapScanDesc) NULL;
|
||||
HeapTuple htup;
|
||||
RetrieveIndexResult indexRes;
|
||||
Buffer buffer;
|
||||
Form_pg_trigger pg_trigger;
|
||||
Oid constr_oid;
|
||||
ScanKeyData skey;
|
||||
HeapTupleData tuple;
|
||||
IndexScanDesc sd = (IndexScanDesc) NULL;
|
||||
HeapScanDesc tgscan = (HeapScanDesc) NULL;
|
||||
HeapTuple htup;
|
||||
RetrieveIndexResult indexRes;
|
||||
Buffer buffer;
|
||||
Form_pg_trigger pg_trigger;
|
||||
Oid constr_oid;
|
||||
|
||||
/* ----------
|
||||
* Check that only named constraints are set explicitly
|
||||
* ----------
|
||||
*/
|
||||
if (strcmp((char *)lfirst(l), "") == 0)
|
||||
if (strcmp((char *) lfirst(l), "") == 0)
|
||||
elog(ERROR, "unnamed constraints cannot be set explicitly");
|
||||
|
||||
/* ----------
|
||||
|
@ -1605,7 +1611,7 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
|
|||
(bits16) 0x0,
|
||||
(AttrNumber) 1,
|
||||
(RegProcedure) F_NAMEEQ,
|
||||
PointerGetDatum((char *)lfirst(l)));
|
||||
PointerGetDatum((char *) lfirst(l)));
|
||||
|
||||
if (hasindex)
|
||||
sd = index_beginscan(irel, false, 1, &skey);
|
||||
|
@ -1629,9 +1635,7 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
|
|||
heap_fetch(tgrel, SnapshotNow, &tuple, &buffer);
|
||||
pfree(indexRes);
|
||||
if (!tuple.t_data)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
htup = &tuple;
|
||||
}
|
||||
else
|
||||
|
@ -1649,13 +1653,13 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
|
|||
*/
|
||||
pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
|
||||
if (stmt->deferred && !pg_trigger->tgdeferrable &&
|
||||
pg_trigger->tgfoid != F_RI_FKEY_RESTRICT_UPD &&
|
||||
pg_trigger->tgfoid != F_RI_FKEY_RESTRICT_DEL)
|
||||
pg_trigger->tgfoid != F_RI_FKEY_RESTRICT_UPD &&
|
||||
pg_trigger->tgfoid != F_RI_FKEY_RESTRICT_DEL)
|
||||
elog(ERROR, "Constraint '%s' is not deferrable",
|
||||
(char *)lfirst(l));
|
||||
(char *) lfirst(l));
|
||||
|
||||
constr_oid = htup->t_data->t_oid;
|
||||
loid = lappend(loid, (Node *)constr_oid);
|
||||
loid = lappend(loid, (Node *) constr_oid);
|
||||
found = true;
|
||||
|
||||
if (hasindex)
|
||||
|
@ -1667,11 +1671,11 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
|
|||
* ----------
|
||||
*/
|
||||
if (!found)
|
||||
elog(ERROR, "Constraint '%s' does not exist", (char *)lfirst(l));
|
||||
elog(ERROR, "Constraint '%s' does not exist", (char *) lfirst(l));
|
||||
|
||||
if (hasindex)
|
||||
index_endscan(sd);
|
||||
else
|
||||
else
|
||||
heap_endscan(tgscan);
|
||||
}
|
||||
if (hasindex)
|
||||
|
@ -1688,10 +1692,10 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
|
|||
*/
|
||||
oldcxt = MemoryContextSwitchTo((MemoryContext) deftrig_gcxt);
|
||||
|
||||
foreach (l, loid)
|
||||
foreach(l, loid)
|
||||
{
|
||||
found = false;
|
||||
foreach (ls, deftrig_dfl_trigstates)
|
||||
foreach(ls, deftrig_dfl_trigstates)
|
||||
{
|
||||
state = (DeferredTriggerStatus) lfirst(ls);
|
||||
if (state->dts_tgoid == (Oid) lfirst(l))
|
||||
|
@ -1704,19 +1708,21 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
|
|||
if (!found)
|
||||
{
|
||||
state = (DeferredTriggerStatus)
|
||||
palloc(sizeof(DeferredTriggerStatusData));
|
||||
state->dts_tgoid = (Oid) lfirst(l);
|
||||
palloc(sizeof(DeferredTriggerStatusData));
|
||||
state->dts_tgoid = (Oid) lfirst(l);
|
||||
state->dts_tgisdeferred = stmt->deferred;
|
||||
|
||||
deftrig_dfl_trigstates =
|
||||
lappend(deftrig_dfl_trigstates, state);
|
||||
lappend(deftrig_dfl_trigstates, state);
|
||||
}
|
||||
}
|
||||
|
||||
MemoryContextSwitchTo(oldcxt);
|
||||
|
||||
return;
|
||||
} else {
|
||||
}
|
||||
else
|
||||
{
|
||||
/* ----------
|
||||
* Inside of a transaction block set the trigger
|
||||
* states of individual triggers on transaction level.
|
||||
|
@ -1724,10 +1730,10 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
|
|||
*/
|
||||
oldcxt = MemoryContextSwitchTo((MemoryContext) deftrig_cxt);
|
||||
|
||||
foreach (l, loid)
|
||||
foreach(l, loid)
|
||||
{
|
||||
found = false;
|
||||
foreach (ls, deftrig_trigstates)
|
||||
foreach(ls, deftrig_trigstates)
|
||||
{
|
||||
state = (DeferredTriggerStatus) lfirst(ls);
|
||||
if (state->dts_tgoid == (Oid) lfirst(l))
|
||||
|
@ -1740,12 +1746,12 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
|
|||
if (!found)
|
||||
{
|
||||
state = (DeferredTriggerStatus)
|
||||
palloc(sizeof(DeferredTriggerStatusData));
|
||||
state->dts_tgoid = (Oid) lfirst(l);
|
||||
palloc(sizeof(DeferredTriggerStatusData));
|
||||
state->dts_tgoid = (Oid) lfirst(l);
|
||||
state->dts_tgisdeferred = stmt->deferred;
|
||||
|
||||
deftrig_trigstates =
|
||||
lappend(deftrig_trigstates, state);
|
||||
lappend(deftrig_trigstates, state);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1764,33 +1770,33 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
|
|||
*/
|
||||
void
|
||||
DeferredTriggerSaveEvent(Relation rel, int event,
|
||||
HeapTuple oldtup, HeapTuple newtup)
|
||||
HeapTuple oldtup, HeapTuple newtup)
|
||||
{
|
||||
MemoryContext oldcxt;
|
||||
DeferredTriggerEvent new_event;
|
||||
DeferredTriggerEvent prev_event;
|
||||
int new_size;
|
||||
int i;
|
||||
int ntriggers;
|
||||
Trigger **triggers;
|
||||
ItemPointerData oldctid;
|
||||
ItemPointerData newctid;
|
||||
TriggerData SaveTriggerData;
|
||||
MemoryContext oldcxt;
|
||||
DeferredTriggerEvent new_event;
|
||||
DeferredTriggerEvent prev_event;
|
||||
int new_size;
|
||||
int i;
|
||||
int ntriggers;
|
||||
Trigger **triggers;
|
||||
ItemPointerData oldctid;
|
||||
ItemPointerData newctid;
|
||||
TriggerData SaveTriggerData;
|
||||
|
||||
if (deftrig_cxt == NULL)
|
||||
elog(ERROR,
|
||||
"DeferredTriggerSaveEvent() called outside of transaction");
|
||||
"DeferredTriggerSaveEvent() called outside of transaction");
|
||||
|
||||
/* ----------
|
||||
* Check if we're interested in this row at all
|
||||
* ----------
|
||||
*/
|
||||
if (rel->trigdesc->n_after_row[TRIGGER_EVENT_INSERT] == 0 &&
|
||||
rel->trigdesc->n_after_row[TRIGGER_EVENT_UPDATE] == 0 &&
|
||||
rel->trigdesc->n_after_row[TRIGGER_EVENT_DELETE] == 0 &&
|
||||
rel->trigdesc->n_before_row[TRIGGER_EVENT_INSERT] == 0 &&
|
||||
rel->trigdesc->n_before_row[TRIGGER_EVENT_UPDATE] == 0 &&
|
||||
rel->trigdesc->n_before_row[TRIGGER_EVENT_DELETE] == 0)
|
||||
rel->trigdesc->n_after_row[TRIGGER_EVENT_UPDATE] == 0 &&
|
||||
rel->trigdesc->n_after_row[TRIGGER_EVENT_DELETE] == 0 &&
|
||||
rel->trigdesc->n_before_row[TRIGGER_EVENT_INSERT] == 0 &&
|
||||
rel->trigdesc->n_before_row[TRIGGER_EVENT_UPDATE] == 0 &&
|
||||
rel->trigdesc->n_before_row[TRIGGER_EVENT_DELETE] == 0)
|
||||
return;
|
||||
|
||||
/* ----------
|
||||
|
@ -1813,14 +1819,14 @@ DeferredTriggerSaveEvent(Relation rel, int event,
|
|||
oldcxt = MemoryContextSwitchTo((MemoryContext) deftrig_cxt);
|
||||
|
||||
ntriggers = rel->trigdesc->n_after_row[event];
|
||||
triggers = rel->trigdesc->tg_after_row[event];
|
||||
triggers = rel->trigdesc->tg_after_row[event];
|
||||
|
||||
new_size = sizeof(DeferredTriggerEventData) +
|
||||
ntriggers * sizeof(DeferredTriggerEventItem);
|
||||
new_size = sizeof(DeferredTriggerEventData) +
|
||||
ntriggers * sizeof(DeferredTriggerEventItem);
|
||||
|
||||
new_event = (DeferredTriggerEvent) palloc(new_size);
|
||||
new_event->dte_event = event & TRIGGER_EVENT_OPMASK;
|
||||
new_event->dte_relid = rel->rd_id;
|
||||
new_event->dte_event = event & TRIGGER_EVENT_OPMASK;
|
||||
new_event->dte_relid = rel->rd_id;
|
||||
ItemPointerCopy(&oldctid, &(new_event->dte_oldctid));
|
||||
ItemPointerCopy(&newctid, &(new_event->dte_newctid));
|
||||
new_event->dte_n_items = ntriggers;
|
||||
|
@ -1830,11 +1836,11 @@ DeferredTriggerSaveEvent(Relation rel, int event,
|
|||
new_event->dte_item[i].dti_tgoid = triggers[i]->tgoid;
|
||||
new_event->dte_item[i].dti_state =
|
||||
((triggers[i]->tgdeferrable) ?
|
||||
TRIGGER_DEFERRED_DEFERRABLE : 0) |
|
||||
TRIGGER_DEFERRED_DEFERRABLE : 0) |
|
||||
((triggers[i]->tginitdeferred) ?
|
||||
TRIGGER_DEFERRED_INITDEFERRED : 0) |
|
||||
TRIGGER_DEFERRED_INITDEFERRED : 0) |
|
||||
((rel->trigdesc->n_before_row[event] > 0) ?
|
||||
TRIGGER_DEFERRED_HAS_BEFORE : 0);
|
||||
TRIGGER_DEFERRED_HAS_BEFORE : 0);
|
||||
}
|
||||
MemoryContextSwitchTo(oldcxt);
|
||||
|
||||
|
@ -1864,8 +1870,8 @@ DeferredTriggerSaveEvent(Relation rel, int event,
|
|||
*/
|
||||
for (i = 0; i < ntriggers; i++)
|
||||
{
|
||||
bool is_ri_trigger;
|
||||
bool key_unchanged;
|
||||
bool is_ri_trigger;
|
||||
bool key_unchanged;
|
||||
|
||||
/* ----------
|
||||
* We are interested in RI_FKEY triggers only.
|
||||
|
@ -1888,11 +1894,11 @@ DeferredTriggerSaveEvent(Relation rel, int event,
|
|||
if (!is_ri_trigger)
|
||||
continue;
|
||||
|
||||
SaveTriggerData.tg_event = TRIGGER_EVENT_UPDATE;
|
||||
SaveTriggerData.tg_relation = rel;
|
||||
SaveTriggerData.tg_event = TRIGGER_EVENT_UPDATE;
|
||||
SaveTriggerData.tg_relation = rel;
|
||||
SaveTriggerData.tg_trigtuple = oldtup;
|
||||
SaveTriggerData.tg_newtuple = newtup;
|
||||
SaveTriggerData.tg_trigger = triggers[i];
|
||||
SaveTriggerData.tg_newtuple = newtup;
|
||||
SaveTriggerData.tg_trigger = triggers[i];
|
||||
|
||||
CurrentTriggerData = &SaveTriggerData;
|
||||
key_unchanged = RI_FKey_keyequal_upd();
|
||||
|
@ -1911,7 +1917,7 @@ DeferredTriggerSaveEvent(Relation rel, int event,
|
|||
if (prev_event)
|
||||
{
|
||||
if (prev_event->dte_event &
|
||||
TRIGGER_DEFERRED_ROW_INSERTED)
|
||||
TRIGGER_DEFERRED_ROW_INSERTED)
|
||||
{
|
||||
/* ----------
|
||||
* This is a row inserted during our transaction.
|
||||
|
@ -1919,11 +1925,11 @@ DeferredTriggerSaveEvent(Relation rel, int event,
|
|||
* ----------
|
||||
*/
|
||||
new_event->dte_event |=
|
||||
TRIGGER_DEFERRED_ROW_INSERTED;
|
||||
TRIGGER_DEFERRED_ROW_INSERTED;
|
||||
new_event->dte_event |=
|
||||
TRIGGER_DEFERRED_KEY_CHANGED;
|
||||
TRIGGER_DEFERRED_KEY_CHANGED;
|
||||
new_event->dte_item[i].dti_state |=
|
||||
TRIGGER_DEFERRED_KEY_CHANGED;
|
||||
TRIGGER_DEFERRED_KEY_CHANGED;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -1934,12 +1940,12 @@ DeferredTriggerSaveEvent(Relation rel, int event,
|
|||
* ----------
|
||||
*/
|
||||
if (prev_event->dte_item[i].dti_state &
|
||||
TRIGGER_DEFERRED_KEY_CHANGED)
|
||||
TRIGGER_DEFERRED_KEY_CHANGED)
|
||||
{
|
||||
new_event->dte_item[i].dti_state |=
|
||||
TRIGGER_DEFERRED_KEY_CHANGED;
|
||||
TRIGGER_DEFERRED_KEY_CHANGED;
|
||||
new_event->dte_event |=
|
||||
TRIGGER_DEFERRED_KEY_CHANGED;
|
||||
TRIGGER_DEFERRED_KEY_CHANGED;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1954,16 +1960,16 @@ DeferredTriggerSaveEvent(Relation rel, int event,
|
|||
if (prev_event)
|
||||
{
|
||||
if (prev_event->dte_event &
|
||||
TRIGGER_DEFERRED_ROW_INSERTED)
|
||||
TRIGGER_DEFERRED_ROW_INSERTED)
|
||||
elog(ERROR, "triggered data change violation "
|
||||
"on relation \"%s\"",
|
||||
nameout(&(rel->rd_rel->relname)));
|
||||
"on relation \"%s\"",
|
||||
nameout(&(rel->rd_rel->relname)));
|
||||
|
||||
if (prev_event->dte_item[i].dti_state &
|
||||
TRIGGER_DEFERRED_KEY_CHANGED)
|
||||
TRIGGER_DEFERRED_KEY_CHANGED)
|
||||
elog(ERROR, "triggered data change violation "
|
||||
"on relation \"%s\"",
|
||||
nameout(&(rel->rd_rel->relname)));
|
||||
"on relation \"%s\"",
|
||||
nameout(&(rel->rd_rel->relname)));
|
||||
}
|
||||
|
||||
/* ----------
|
||||
|
@ -1972,7 +1978,7 @@ DeferredTriggerSaveEvent(Relation rel, int event,
|
|||
* ----------
|
||||
*/
|
||||
new_event->dte_item[i].dti_state |=
|
||||
TRIGGER_DEFERRED_KEY_CHANGED;
|
||||
TRIGGER_DEFERRED_KEY_CHANGED;
|
||||
new_event->dte_event |= TRIGGER_DEFERRED_KEY_CHANGED;
|
||||
}
|
||||
}
|
||||
|
@ -1996,8 +2002,8 @@ DeferredTriggerSaveEvent(Relation rel, int event,
|
|||
prev_event = deferredTriggerGetPreviousEvent(rel->rd_id, &oldctid);
|
||||
if (prev_event->dte_event & TRIGGER_DEFERRED_KEY_CHANGED)
|
||||
elog(ERROR, "triggered data change violation "
|
||||
"on relation \"%s\"",
|
||||
nameout(&(rel->rd_rel->relname)));
|
||||
"on relation \"%s\"",
|
||||
nameout(&(rel->rd_rel->relname)));
|
||||
|
||||
break;
|
||||
}
|
||||
|
@ -2012,5 +2018,3 @@ DeferredTriggerSaveEvent(Relation rel, int event,
|
|||
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.146 2000/04/06 18:12:07 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.147 2000/04/12 17:14:59 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -102,15 +102,17 @@ static char *vc_show_rusage(struct rusage * ru0);
|
|||
|
||||
/*
|
||||
* This routines handle a special cross-transaction portal.
|
||||
* However it is automatically closed in case of abort.
|
||||
* However it is automatically closed in case of abort.
|
||||
*/
|
||||
void CommonSpecialPortalOpen(void)
|
||||
void
|
||||
CommonSpecialPortalOpen(void)
|
||||
{
|
||||
char *pname;
|
||||
|
||||
|
||||
if (CommonSpecialPortalInUse)
|
||||
elog(ERROR, "CommonSpecialPortal is in use");
|
||||
|
||||
/*
|
||||
* Create a portal for safe memory across transactions. We need to
|
||||
* palloc the name space for it because our hash function expects the
|
||||
|
@ -130,7 +132,8 @@ void CommonSpecialPortalOpen(void)
|
|||
CommonSpecialPortalInUse = true;
|
||||
}
|
||||
|
||||
void CommonSpecialPortalClose(void)
|
||||
void
|
||||
CommonSpecialPortalClose(void)
|
||||
{
|
||||
/* Clear flag first, to avoid recursion if PortalDrop elog's */
|
||||
CommonSpecialPortalInUse = false;
|
||||
|
@ -141,16 +144,18 @@ void CommonSpecialPortalClose(void)
|
|||
PortalDrop(&vc_portal);
|
||||
}
|
||||
|
||||
PortalVariableMemory CommonSpecialPortalGetMemory(void)
|
||||
PortalVariableMemory
|
||||
CommonSpecialPortalGetMemory(void)
|
||||
{
|
||||
return PortalGetVariableMemory(vc_portal);
|
||||
}
|
||||
|
||||
bool CommonSpecialPortalIsOpen(void)
|
||||
bool
|
||||
CommonSpecialPortalIsOpen(void)
|
||||
{
|
||||
return CommonSpecialPortalInUse;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void
|
||||
vacuum(char *vacrel, bool verbose, bool analyze, List *va_spec)
|
||||
{
|
||||
|
@ -208,9 +213,9 @@ vacuum(char *vacrel, bool verbose, bool analyze, List *va_spec)
|
|||
* Start up the vacuum cleaner.
|
||||
*
|
||||
* NOTE: since this commits the current transaction, the memory holding
|
||||
* any passed-in parameters gets freed here. We must have already copied
|
||||
* pass-by-reference parameters to safe storage. Don't make me fix this
|
||||
* again!
|
||||
* any passed-in parameters gets freed here. We must have already
|
||||
* copied pass-by-reference parameters to safe storage. Don't make me
|
||||
* fix this again!
|
||||
*/
|
||||
vc_init();
|
||||
|
||||
|
@ -316,11 +321,12 @@ vc_getrels(NameData *VacRelP)
|
|||
|
||||
if (NameStr(*VacRelP))
|
||||
{
|
||||
|
||||
/*
|
||||
* we could use the cache here, but it is clearer to use scankeys
|
||||
* for both vacuum cases, bjm 2000/01/19
|
||||
*/
|
||||
char *nontemp_relname;
|
||||
char *nontemp_relname;
|
||||
|
||||
/* We must re-map temp table names bjm 2000-04-06 */
|
||||
if ((nontemp_relname =
|
||||
|
@ -414,7 +420,7 @@ vc_vacone(Oid relid, bool analyze, List *va_cols)
|
|||
int32 nindices,
|
||||
i;
|
||||
VRelStats *vacrelstats;
|
||||
bool reindex = false;
|
||||
bool reindex = false;
|
||||
|
||||
StartTransactionCommand();
|
||||
|
||||
|
@ -678,7 +684,7 @@ static void
|
|||
vc_scanheap(VRelStats *vacrelstats, Relation onerel,
|
||||
VPageList vacuum_pages, VPageList fraged_pages)
|
||||
{
|
||||
BlockNumber nblocks,
|
||||
BlockNumber nblocks,
|
||||
blkno;
|
||||
ItemId itemid;
|
||||
Buffer buf;
|
||||
|
@ -1201,7 +1207,7 @@ vc_repair_frag(VRelStats *vacrelstats, Relation onerel,
|
|||
last_vacuum_block = -1;
|
||||
}
|
||||
if (num_fraged_pages > 0 &&
|
||||
fraged_pages->vpl_pagedesc[num_fraged_pages - 1]->vpd_blkno ==
|
||||
fraged_pages->vpl_pagedesc[num_fraged_pages - 1]->vpd_blkno ==
|
||||
(BlockNumber) blkno)
|
||||
{
|
||||
/* page is in fraged_pages too; remove it */
|
||||
|
@ -1456,8 +1462,8 @@ vc_repair_frag(VRelStats *vacrelstats, Relation onerel,
|
|||
* we stop shrinking here. I could try to find
|
||||
* real parent row but want not to do it because
|
||||
* of real solution will be implemented anyway,
|
||||
* latter, and we are too close to 6.5 release.
|
||||
* - vadim 06/11/99
|
||||
* latter, and we are too close to 6.5 release. -
|
||||
* vadim 06/11/99
|
||||
*/
|
||||
if (Ptp.t_data->t_xmax != tp.t_data->t_xmin)
|
||||
{
|
||||
|
@ -1539,20 +1545,23 @@ vc_repair_frag(VRelStats *vacrelstats, Relation onerel,
|
|||
* to get t_infomask of inserted heap tuple !!!
|
||||
*/
|
||||
ToPage = BufferGetPage(cur_buffer);
|
||||
|
||||
/*
|
||||
* If this page was not used before - clean it.
|
||||
*
|
||||
* This path is different from the other callers of
|
||||
* vc_vacpage, because we have already incremented the
|
||||
* vpd's vpd_offsets_used field to account for the
|
||||
* tuple(s) we expect to move onto the page. Therefore
|
||||
* vc_vacpage's check for vpd_offsets_used == 0 is wrong.
|
||||
* But since that's a good debugging check for all other
|
||||
* callers, we work around it here rather than remove it.
|
||||
* tuple(s) we expect to move onto the page. Therefore
|
||||
* vc_vacpage's check for vpd_offsets_used == 0 is
|
||||
* wrong. But since that's a good debugging check for
|
||||
* all other callers, we work around it here rather
|
||||
* than remove it.
|
||||
*/
|
||||
if (!PageIsEmpty(ToPage) && vtmove[ti].cleanVpd)
|
||||
{
|
||||
int sv_offsets_used = destvpd->vpd_offsets_used;
|
||||
int sv_offsets_used = destvpd->vpd_offsets_used;
|
||||
|
||||
destvpd->vpd_offsets_used = 0;
|
||||
vc_vacpage(ToPage, destvpd);
|
||||
destvpd->vpd_offsets_used = sv_offsets_used;
|
||||
|
@ -2267,7 +2276,7 @@ vc_attrstats(Relation onerel, VRelStats *vacrelstats, HeapTuple tuple)
|
|||
#ifdef _DROP_COLUMN_HACK__
|
||||
if (COLUMN_IS_DROPPED(stats->attr))
|
||||
continue;
|
||||
#endif /* _DROP_COLUMN_HACK__ */
|
||||
#endif /* _DROP_COLUMN_HACK__ */
|
||||
value = heap_getattr(tuple,
|
||||
stats->attr->attnum, tupDesc, &isnull);
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.33 2000/04/07 13:39:24 thomas Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.34 2000/04/12 17:15:00 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -436,7 +436,7 @@ parse_geqo(char *value)
|
|||
|
||||
if (strcasecmp(tok, "on") == 0)
|
||||
{
|
||||
int new_geqo_rels = GEQO_RELS;
|
||||
int new_geqo_rels = GEQO_RELS;
|
||||
|
||||
if (val != NULL)
|
||||
{
|
||||
|
@ -505,7 +505,7 @@ static bool
|
|||
show_effective_cache_size()
|
||||
{
|
||||
elog(NOTICE, "EFFECTIVE_CACHE_SIZE is %g (%dK pages)",
|
||||
effective_cache_size, BLCKSZ/1024);
|
||||
effective_cache_size, BLCKSZ / 1024);
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
|
@ -656,12 +656,12 @@ reset_cpu_operator_cost()
|
|||
* DATE_STYLE
|
||||
*
|
||||
* NOTE: set_default_datestyle() is called during backend startup to check
|
||||
* if the PGDATESTYLE environment variable is set. We want the env var
|
||||
* if the PGDATESTYLE environment variable is set. We want the env var
|
||||
* to determine the value that "RESET DateStyle" will reset to!
|
||||
*/
|
||||
|
||||
/* These get initialized from the "master" values in init/globals.c */
|
||||
static int DefaultDateStyle;
|
||||
static int DefaultDateStyle;
|
||||
static bool DefaultEuroDates;
|
||||
|
||||
static bool
|
||||
|
@ -777,8 +777,9 @@ set_default_datestyle(void)
|
|||
{
|
||||
char *DBDate;
|
||||
|
||||
/* Initialize from compile-time defaults in init/globals.c.
|
||||
* NB: this is a necessary step; consider PGDATESTYLE="DEFAULT".
|
||||
/*
|
||||
* Initialize from compile-time defaults in init/globals.c. NB: this
|
||||
* is a necessary step; consider PGDATESTYLE="DEFAULT".
|
||||
*/
|
||||
DefaultDateStyle = DateStyle;
|
||||
DefaultEuroDates = EuroDates;
|
||||
|
@ -788,9 +789,11 @@ set_default_datestyle(void)
|
|||
if (DBDate == NULL)
|
||||
return;
|
||||
|
||||
/* Make a modifiable copy --- overwriting the env var doesn't seem
|
||||
/*
|
||||
* Make a modifiable copy --- overwriting the env var doesn't seem
|
||||
* like a good idea, even though we currently won't look at it again.
|
||||
* Note that we cannot use palloc at this early stage of initialization.
|
||||
* Note that we cannot use palloc at this early stage of
|
||||
* initialization.
|
||||
*/
|
||||
DBDate = strdup(DBDate);
|
||||
|
||||
|
@ -1041,9 +1044,8 @@ reset_XactIsoLevel()
|
|||
static bool
|
||||
parse_pg_options(char *value)
|
||||
{
|
||||
if (!superuser()) {
|
||||
if (!superuser())
|
||||
elog(ERROR, "Only users with superuser privilege can set pg_options");
|
||||
}
|
||||
if (value == NULL)
|
||||
read_pg_options(0);
|
||||
else
|
||||
|
@ -1061,9 +1063,8 @@ show_pg_options(void)
|
|||
static bool
|
||||
reset_pg_options(void)
|
||||
{
|
||||
if (!superuser()) {
|
||||
if (!superuser())
|
||||
elog(ERROR, "Only users with superuser privilege can set pg_options");
|
||||
}
|
||||
read_pg_options(0);
|
||||
return (TRUE);
|
||||
}
|
||||
|
@ -1075,7 +1076,7 @@ reset_pg_options(void)
|
|||
static bool
|
||||
parse_random_seed(char *value)
|
||||
{
|
||||
double seed = 0;
|
||||
double seed = 0;
|
||||
|
||||
if (value == NULL)
|
||||
reset_random_seed();
|
||||
|
@ -1097,7 +1098,7 @@ show_random_seed(void)
|
|||
static bool
|
||||
reset_random_seed(void)
|
||||
{
|
||||
double seed = 0.5;
|
||||
double seed = 0.5;
|
||||
|
||||
setseed(&seed);
|
||||
return (TRUE);
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
* Portions Copyright (c) 1996-2000, PostgreSQL, Inc
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Id: execAmi.c,v 1.45 2000/01/26 05:56:21 momjian Exp $
|
||||
* $Id: execAmi.c,v 1.46 2000/04/12 17:15:07 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -235,9 +235,10 @@ ExecCloseR(Plan *node)
|
|||
heap_endscan(scanDesc);
|
||||
|
||||
/*
|
||||
* endscan released AccessShareLock acquired by beginscan. If we are
|
||||
* holding any stronger locks on the rel, they should be held till end of
|
||||
* xact. Therefore, we need only close the rel and not release locks.
|
||||
* endscan released AccessShareLock acquired by beginscan. If we are
|
||||
* holding any stronger locks on the rel, they should be held till end
|
||||
* of xact. Therefore, we need only close the rel and not release
|
||||
* locks.
|
||||
*/
|
||||
if (relation != NULL)
|
||||
heap_close(relation, NoLock);
|
||||
|
@ -423,7 +424,7 @@ ExecMarkPos(Plan *node)
|
|||
{
|
||||
switch (nodeTag(node))
|
||||
{
|
||||
case T_SeqScan:
|
||||
case T_SeqScan:
|
||||
ExecSeqMarkPos((SeqScan *) node);
|
||||
break;
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.112 2000/04/07 07:24:47 vadim Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.113 2000/04/12 17:15:08 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -48,7 +48,7 @@
|
|||
|
||||
/* XXX no points for style */
|
||||
extern TupleTableSlot *EvalPlanQual(EState *estate, Index rti,
|
||||
ItemPointer tid);
|
||||
ItemPointer tid);
|
||||
|
||||
/* decls for local routines only used within this module */
|
||||
static TupleDesc InitPlan(CmdType operation,
|
||||
|
@ -75,13 +75,14 @@ static void ExecReplace(TupleTableSlot *slot, ItemPointer tupleid,
|
|||
static TupleTableSlot *EvalPlanQualNext(EState *estate);
|
||||
static void EndEvalPlanQual(EState *estate);
|
||||
static void ExecCheckQueryPerms(CmdType operation, Query *parseTree,
|
||||
Plan *plan);
|
||||
Plan *plan);
|
||||
static void ExecCheckPlanPerms(Plan *plan, CmdType operation,
|
||||
int resultRelation, bool resultIsScanned);
|
||||
int resultRelation, bool resultIsScanned);
|
||||
static void ExecCheckRTPerms(List *rangeTable, CmdType operation,
|
||||
int resultRelation, bool resultIsScanned);
|
||||
int resultRelation, bool resultIsScanned);
|
||||
static void ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation,
|
||||
bool isResultRelation, bool resultIsScanned);
|
||||
bool isResultRelation, bool resultIsScanned);
|
||||
|
||||
/* end of local decls */
|
||||
|
||||
|
||||
|
@ -460,14 +461,14 @@ ExecCheckPlanPerms(Plan *plan, CmdType operation,
|
|||
|
||||
foreach(subp, plan->initPlan)
|
||||
{
|
||||
SubPlan *subplan = (SubPlan *) lfirst(subp);
|
||||
SubPlan *subplan = (SubPlan *) lfirst(subp);
|
||||
|
||||
ExecCheckRTPerms(subplan->rtable, CMD_SELECT, 0, false);
|
||||
ExecCheckPlanPerms(subplan->plan, CMD_SELECT, 0, false);
|
||||
}
|
||||
foreach(subp, plan->subPlan)
|
||||
{
|
||||
SubPlan *subplan = (SubPlan *) lfirst(subp);
|
||||
SubPlan *subplan = (SubPlan *) lfirst(subp);
|
||||
|
||||
ExecCheckRTPerms(subplan->rtable, CMD_SELECT, 0, false);
|
||||
ExecCheckPlanPerms(subplan->plan, CMD_SELECT, 0, false);
|
||||
|
@ -485,49 +486,51 @@ ExecCheckPlanPerms(Plan *plan, CmdType operation,
|
|||
switch (nodeTag(plan))
|
||||
{
|
||||
case T_Append:
|
||||
{
|
||||
Append *app = (Append *) plan;
|
||||
List *appendplans;
|
||||
|
||||
if (app->inheritrelid > 0)
|
||||
{
|
||||
/*
|
||||
* Append implements expansion of inheritance; all members
|
||||
* of inheritrtable list will be plugged into same RTE slot.
|
||||
* Therefore, they are either all result relations or none.
|
||||
*/
|
||||
List *rtable;
|
||||
Append *app = (Append *) plan;
|
||||
List *appendplans;
|
||||
|
||||
foreach(rtable, app->inheritrtable)
|
||||
if (app->inheritrelid > 0)
|
||||
{
|
||||
ExecCheckRTEPerms((RangeTblEntry *) lfirst(rtable),
|
||||
operation,
|
||||
(app->inheritrelid == resultRelation),
|
||||
resultIsScanned);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Append implements UNION, which must be a SELECT */
|
||||
List *rtables;
|
||||
|
||||
foreach(rtables, app->unionrtables)
|
||||
/*
|
||||
* Append implements expansion of inheritance; all
|
||||
* members of inheritrtable list will be plugged into
|
||||
* same RTE slot. Therefore, they are either all
|
||||
* result relations or none.
|
||||
*/
|
||||
List *rtable;
|
||||
|
||||
foreach(rtable, app->inheritrtable)
|
||||
{
|
||||
ExecCheckRTEPerms((RangeTblEntry *) lfirst(rtable),
|
||||
operation,
|
||||
(app->inheritrelid == resultRelation),
|
||||
resultIsScanned);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
ExecCheckRTPerms((List *) lfirst(rtables),
|
||||
CMD_SELECT, 0, false);
|
||||
}
|
||||
}
|
||||
/* Append implements UNION, which must be a SELECT */
|
||||
List *rtables;
|
||||
|
||||
/* Check appended plans */
|
||||
foreach(appendplans, app->appendplans)
|
||||
{
|
||||
ExecCheckPlanPerms((Plan *) lfirst(appendplans),
|
||||
operation,
|
||||
resultRelation,
|
||||
resultIsScanned);
|
||||
foreach(rtables, app->unionrtables)
|
||||
{
|
||||
ExecCheckRTPerms((List *) lfirst(rtables),
|
||||
CMD_SELECT, 0, false);
|
||||
}
|
||||
}
|
||||
|
||||
/* Check appended plans */
|
||||
foreach(appendplans, app->appendplans)
|
||||
{
|
||||
ExecCheckPlanPerms((Plan *) lfirst(appendplans),
|
||||
operation,
|
||||
resultRelation,
|
||||
resultIsScanned);
|
||||
}
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
break;
|
||||
|
@ -539,7 +542,7 @@ ExecCheckPlanPerms(Plan *plan, CmdType operation,
|
|||
* Check access permissions for all relations listed in a range table.
|
||||
*
|
||||
* If resultRelation is not 0, it is the RT index of the relation to be
|
||||
* treated as the result relation. All other relations are assumed to be
|
||||
* treated as the result relation. All other relations are assumed to be
|
||||
* read-only for the query.
|
||||
*/
|
||||
static void
|
||||
|
@ -576,10 +579,11 @@ ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation,
|
|||
|
||||
if (rte->skipAcl)
|
||||
{
|
||||
|
||||
/*
|
||||
* This happens if the access to this table is due to a view
|
||||
* query rewriting - the rewrite handler already checked the
|
||||
* permissions against the view owner, so we just skip this entry.
|
||||
* This happens if the access to this table is due to a view query
|
||||
* rewriting - the rewrite handler already checked the permissions
|
||||
* against the view owner, so we just skip this entry.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
@ -620,14 +624,12 @@ ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation,
|
|||
default:
|
||||
elog(ERROR, "ExecCheckRTEPerms: bogus operation %d",
|
||||
operation);
|
||||
aclcheck_result = ACLCHECK_OK; /* keep compiler quiet */
|
||||
aclcheck_result = ACLCHECK_OK; /* keep compiler quiet */
|
||||
break;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
aclcheck_result = CHECK(ACL_RD);
|
||||
}
|
||||
|
||||
if (aclcheck_result != ACLCHECK_OK)
|
||||
elog(ERROR, "%s: %s",
|
||||
|
@ -734,8 +736,9 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
|
|||
/*
|
||||
* If there are indices on the result relation, open them and save
|
||||
* descriptors in the result relation info, so that we can add new
|
||||
* index entries for the tuples we add/update. We need not do this
|
||||
* for a DELETE, however, since deletion doesn't affect indexes.
|
||||
* index entries for the tuples we add/update. We need not do
|
||||
* this for a DELETE, however, since deletion doesn't affect
|
||||
* indexes.
|
||||
*/
|
||||
if (resultRelationDesc->rd_rel->relhasindex &&
|
||||
operation != CMD_DELETE)
|
||||
|
@ -805,10 +808,11 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
|
|||
targetList = plan->targetlist;
|
||||
|
||||
/*
|
||||
* Now that we have the target list, initialize the junk filter if needed.
|
||||
* SELECT and INSERT queries need a filter if there are any junk attrs
|
||||
* in the tlist. UPDATE and DELETE always need one, since there's always
|
||||
* a junk 'ctid' attribute present --- no need to look first.
|
||||
* Now that we have the target list, initialize the junk filter if
|
||||
* needed. SELECT and INSERT queries need a filter if there are any
|
||||
* junk attrs in the tlist. UPDATE and DELETE always need one, since
|
||||
* there's always a junk 'ctid' attribute present --- no need to look
|
||||
* first.
|
||||
*/
|
||||
{
|
||||
bool junk_filter_needed = false;
|
||||
|
@ -948,8 +952,8 @@ EndPlan(Plan *plan, EState *estate)
|
|||
}
|
||||
|
||||
/*
|
||||
* close the result relations if necessary,
|
||||
* but hold locks on them until xact commit
|
||||
* close the result relations if necessary, but hold locks on them
|
||||
* until xact commit
|
||||
*/
|
||||
if (resultRelationInfo != NULL)
|
||||
{
|
||||
|
@ -1708,10 +1712,10 @@ ExecRelCheck(Relation rel, HeapTuple tuple, EState *estate)
|
|||
|
||||
/*
|
||||
* NOTE: SQL92 specifies that a NULL result from a constraint
|
||||
* expression is not to be treated as a failure. Therefore,
|
||||
* tell ExecQual to return TRUE for NULL.
|
||||
* expression is not to be treated as a failure. Therefore, tell
|
||||
* ExecQual to return TRUE for NULL.
|
||||
*/
|
||||
if (! ExecQual(qual, econtext, true))
|
||||
if (!ExecQual(qual, econtext, true))
|
||||
return check[i].ccname;
|
||||
}
|
||||
|
||||
|
@ -1738,7 +1742,7 @@ ExecConstraints(char *caller, Relation rel, HeapTuple tuple, EState *estate)
|
|||
{
|
||||
if (rel->rd_att->attrs[attrChk - 1]->attnotnull && heap_attisnull(tuple, attrChk))
|
||||
elog(ERROR, "%s: Fail to add null value in not null attribute %s",
|
||||
caller, NameStr(rel->rd_att->attrs[attrChk - 1]->attname));
|
||||
caller, NameStr(rel->rd_att->attrs[attrChk - 1]->attname));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1791,7 +1795,7 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
|
|||
Assert(oldepq->rti != 0);
|
||||
/* stop execution */
|
||||
ExecEndNode(epq->plan, epq->plan);
|
||||
epqstate->es_tupleTable->next = 0;
|
||||
epqstate->es_tupleTable->next = 0;
|
||||
heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
|
||||
epqstate->es_evTuple[epq->rti - 1] = NULL;
|
||||
/* push current PQ to freePQ stack */
|
||||
|
@ -1861,7 +1865,7 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
|
|||
if (endNode)
|
||||
{
|
||||
ExecEndNode(epq->plan, epq->plan);
|
||||
epqstate->es_tupleTable->next = 0;
|
||||
epqstate->es_tupleTable->next = 0;
|
||||
}
|
||||
|
||||
/* free old RTE' tuple */
|
||||
|
@ -1949,10 +1953,10 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
|
|||
estate->es_evalPlanQual = (Pointer) epq;
|
||||
}
|
||||
else
|
||||
{
|
||||
epq->rti = 0; /* this is the first (oldest) */
|
||||
estate->es_useEvalPlan = false; /* PQ - mark as free and */
|
||||
return (NULL); /* continue Query execution */
|
||||
{
|
||||
epq->rti = 0; /* this is the first (oldest) */
|
||||
estate->es_useEvalPlan = false; /* PQ - mark as free and */
|
||||
return (NULL); /* continue Query execution */
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1961,7 +1965,7 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
|
|||
estate->es_origPlan->nParamExec * sizeof(ParamExecData));
|
||||
memset(epqstate->es_evTupleNull, false,
|
||||
length(estate->es_range_table) * sizeof(bool));
|
||||
Assert(epqstate->es_tupleTable->next == 0);
|
||||
Assert(epqstate->es_tupleTable->next == 0);
|
||||
ExecInitNode(epq->plan, epqstate, NULL);
|
||||
|
||||
/*
|
||||
|
@ -1992,16 +1996,16 @@ lpqnext:;
|
|||
if (TupIsNull(slot))
|
||||
{
|
||||
ExecEndNode(epq->plan, epq->plan);
|
||||
epqstate->es_tupleTable->next = 0;
|
||||
epqstate->es_tupleTable->next = 0;
|
||||
heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
|
||||
epqstate->es_evTuple[epq->rti - 1] = NULL;
|
||||
/* pop old PQ from the stack */
|
||||
oldepq = (evalPlanQual *) epqstate->es_evalPlanQual;
|
||||
if (oldepq == (evalPlanQual *) NULL)
|
||||
{
|
||||
epq->rti = 0; /* this is the first (oldest) */
|
||||
estate->es_useEvalPlan = false; /* PQ - mark as free and */
|
||||
return (NULL); /* continue Query execution */
|
||||
epq->rti = 0; /* this is the first (oldest) */
|
||||
estate->es_useEvalPlan = false; /* PQ - mark as free and */
|
||||
return (NULL); /* continue Query execution */
|
||||
}
|
||||
Assert(oldepq->rti != 0);
|
||||
/* push current PQ to freePQ stack */
|
||||
|
@ -2031,7 +2035,7 @@ EndEvalPlanQual(EState *estate)
|
|||
for (;;)
|
||||
{
|
||||
ExecEndNode(epq->plan, epq->plan);
|
||||
epqstate->es_tupleTable->next = 0;
|
||||
epqstate->es_tupleTable->next = 0;
|
||||
if (epqstate->es_evTuple[epq->rti - 1] != NULL)
|
||||
{
|
||||
heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
|
||||
|
@ -2041,8 +2045,8 @@ EndEvalPlanQual(EState *estate)
|
|||
oldepq = (evalPlanQual *) epqstate->es_evalPlanQual;
|
||||
if (oldepq == (evalPlanQual *) NULL)
|
||||
{
|
||||
epq->rti = 0; /* this is the first (oldest) */
|
||||
estate->es_useEvalPlan = false; /* PQ - mark as free */
|
||||
epq->rti = 0; /* this is the first (oldest) */
|
||||
estate->es_useEvalPlan = false; /* PQ - mark as free */
|
||||
break;
|
||||
}
|
||||
Assert(oldepq->rti != 0);
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.68 2000/02/20 21:32:04 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.69 2000/04/12 17:15:08 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -108,12 +108,14 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
|
|||
}
|
||||
else
|
||||
{
|
||||
/* Null refexpr indicates we are doing an INSERT into an array column.
|
||||
* For now, we just take the refassgnexpr (which the parser will have
|
||||
* ensured is an array value) and return it as-is, ignoring any
|
||||
* subscripts that may have been supplied in the INSERT column list.
|
||||
* This is a kluge, but it's not real clear what the semantics ought
|
||||
* to be...
|
||||
|
||||
/*
|
||||
* Null refexpr indicates we are doing an INSERT into an array
|
||||
* column. For now, we just take the refassgnexpr (which the
|
||||
* parser will have ensured is an array value) and return it
|
||||
* as-is, ignoring any subscripts that may have been supplied in
|
||||
* the INSERT column list. This is a kluge, but it's not real
|
||||
* clear what the semantics ought to be...
|
||||
*/
|
||||
array_scanner = NULL;
|
||||
}
|
||||
|
@ -153,16 +155,15 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
|
|||
lIndex = lower.indx;
|
||||
}
|
||||
else
|
||||
{
|
||||
lIndex = NULL;
|
||||
}
|
||||
|
||||
if (arrayRef->refassgnexpr != NULL)
|
||||
{
|
||||
Datum sourceData = ExecEvalExpr(arrayRef->refassgnexpr,
|
||||
econtext,
|
||||
isNull,
|
||||
&dummy);
|
||||
Datum sourceData = ExecEvalExpr(arrayRef->refassgnexpr,
|
||||
econtext,
|
||||
isNull,
|
||||
&dummy);
|
||||
|
||||
if (*isNull)
|
||||
return (Datum) NULL;
|
||||
|
||||
|
@ -209,7 +210,7 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
|
|||
static Datum
|
||||
ExecEvalAggref(Aggref *aggref, ExprContext *econtext, bool *isNull)
|
||||
{
|
||||
if (econtext->ecxt_aggvalues == NULL) /* safety check */
|
||||
if (econtext->ecxt_aggvalues == NULL) /* safety check */
|
||||
elog(ERROR, "ExecEvalAggref: no aggregates in this expression context");
|
||||
|
||||
*isNull = econtext->ecxt_aggnulls[aggref->aggno];
|
||||
|
@ -281,7 +282,7 @@ ExecEvalVar(Var *variable, ExprContext *econtext, bool *isNull)
|
|||
Assert(attnum <= 0 ||
|
||||
(attnum - 1 <= tuple_type->natts - 1 &&
|
||||
tuple_type->attrs[attnum - 1] != NULL &&
|
||||
variable->vartype == tuple_type->attrs[attnum - 1]->atttypid));
|
||||
variable->vartype == tuple_type->attrs[attnum - 1]->atttypid));
|
||||
|
||||
/*
|
||||
* If the attribute number is invalid, then we are supposed to return
|
||||
|
@ -633,7 +634,7 @@ ExecEvalFuncArgs(FunctionCachePtr fcache,
|
|||
*/
|
||||
argV[i] = ExecEvalExpr((Node *) lfirst(arg),
|
||||
econtext,
|
||||
& nullVect[i],
|
||||
&nullVect[i],
|
||||
argIsDone);
|
||||
|
||||
if (!(*argIsDone))
|
||||
|
@ -779,9 +780,9 @@ ExecMakeFunctionResult(Node *node,
|
|||
result = postquel_function(funcNode, (char **) argV,
|
||||
isNull, isDone);
|
||||
|
||||
if (! *isDone)
|
||||
if (!*isDone)
|
||||
break; /* got a result from current argument */
|
||||
if (! fcache->hasSetArg)
|
||||
if (!fcache->hasSetArg)
|
||||
break; /* input not a set, so done */
|
||||
|
||||
/* OK, get the next argument... */
|
||||
|
@ -789,7 +790,11 @@ ExecMakeFunctionResult(Node *node,
|
|||
|
||||
if (argDone)
|
||||
{
|
||||
/* End of arguments, so reset the setArg flag and say "Done" */
|
||||
|
||||
/*
|
||||
* End of arguments, so reset the setArg flag and say
|
||||
* "Done"
|
||||
*/
|
||||
fcache->setArg = (char *) NULL;
|
||||
fcache->hasSetArg = false;
|
||||
*isDone = true;
|
||||
|
@ -797,7 +802,8 @@ ExecMakeFunctionResult(Node *node,
|
|||
break;
|
||||
}
|
||||
|
||||
/* If we reach here, loop around to run the function on the
|
||||
/*
|
||||
* If we reach here, loop around to run the function on the
|
||||
* new argument.
|
||||
*/
|
||||
}
|
||||
|
@ -1003,20 +1009,22 @@ ExecEvalOr(Expr *orExpr, ExprContext *econtext, bool *isNull)
|
|||
AnyNull = false;
|
||||
|
||||
/*
|
||||
* If any of the clauses is TRUE, the OR result is TRUE regardless
|
||||
* of the states of the rest of the clauses, so we can stop evaluating
|
||||
* If any of the clauses is TRUE, the OR result is TRUE regardless of
|
||||
* the states of the rest of the clauses, so we can stop evaluating
|
||||
* and return TRUE immediately. If none are TRUE and one or more is
|
||||
* NULL, we return NULL; otherwise we return FALSE. This makes sense
|
||||
* when you interpret NULL as "don't know": if we have a TRUE then the
|
||||
* OR is TRUE even if we aren't sure about some of the other inputs.
|
||||
* If all the known inputs are FALSE, but we have one or more "don't
|
||||
* knows", then we have to report that we "don't know" what the OR's
|
||||
* result should be --- perhaps one of the "don't knows" would have been
|
||||
* TRUE if we'd known its value. Only when all the inputs are known
|
||||
* to be FALSE can we state confidently that the OR's result is FALSE.
|
||||
* result should be --- perhaps one of the "don't knows" would have
|
||||
* been TRUE if we'd known its value. Only when all the inputs are
|
||||
* known to be FALSE can we state confidently that the OR's result is
|
||||
* FALSE.
|
||||
*/
|
||||
foreach(clause, clauses)
|
||||
{
|
||||
|
||||
/*
|
||||
* We don't iterate over sets in the quals, so pass in an isDone
|
||||
* flag, but ignore it.
|
||||
|
@ -1025,6 +1033,7 @@ ExecEvalOr(Expr *orExpr, ExprContext *econtext, bool *isNull)
|
|||
econtext,
|
||||
isNull,
|
||||
&isDone);
|
||||
|
||||
/*
|
||||
* if we have a non-null true result, then return it.
|
||||
*/
|
||||
|
@ -1059,12 +1068,13 @@ ExecEvalAnd(Expr *andExpr, ExprContext *econtext, bool *isNull)
|
|||
* If any of the clauses is FALSE, the AND result is FALSE regardless
|
||||
* of the states of the rest of the clauses, so we can stop evaluating
|
||||
* and return FALSE immediately. If none are FALSE and one or more is
|
||||
* NULL, we return NULL; otherwise we return TRUE. This makes sense
|
||||
* NULL, we return NULL; otherwise we return TRUE. This makes sense
|
||||
* when you interpret NULL as "don't know", using the same sort of
|
||||
* reasoning as for OR, above.
|
||||
*/
|
||||
foreach(clause, clauses)
|
||||
{
|
||||
|
||||
/*
|
||||
* We don't iterate over sets in the quals, so pass in an isDone
|
||||
* flag, but ignore it.
|
||||
|
@ -1073,6 +1083,7 @@ ExecEvalAnd(Expr *andExpr, ExprContext *econtext, bool *isNull)
|
|||
econtext,
|
||||
isNull,
|
||||
&isDone);
|
||||
|
||||
/*
|
||||
* if we have a non-null false result, then return it.
|
||||
*/
|
||||
|
@ -1084,7 +1095,7 @@ ExecEvalAnd(Expr *andExpr, ExprContext *econtext, bool *isNull)
|
|||
|
||||
/* AnyNull is true if at least one clause evaluated to NULL */
|
||||
*isNull = AnyNull;
|
||||
return (Datum) (! AnyNull);
|
||||
return (Datum) (!AnyNull);
|
||||
}
|
||||
|
||||
/* ----------------------------------------------------------------
|
||||
|
@ -1129,7 +1140,7 @@ ExecEvalCase(CaseExpr *caseExpr, ExprContext *econtext, bool *isNull)
|
|||
* case statement is satisfied. A NULL result from the test is
|
||||
* not considered true.
|
||||
*/
|
||||
if (DatumGetInt32(clause_value) != 0 && ! *isNull)
|
||||
if (DatumGetInt32(clause_value) != 0 && !*isNull)
|
||||
{
|
||||
return ExecEvalExpr(wclause->result,
|
||||
econtext,
|
||||
|
@ -1258,7 +1269,7 @@ ExecEvalExpr(Node *expression,
|
|||
default:
|
||||
elog(ERROR, "ExecEvalExpr: unknown expression type %d",
|
||||
expr->opType);
|
||||
retDatum = 0; /* keep compiler quiet */
|
||||
retDatum = 0; /* keep compiler quiet */
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
@ -1332,7 +1343,7 @@ ExecQual(List *qual, ExprContext *econtext, bool resultForNull)
|
|||
IncrProcessed();
|
||||
|
||||
/*
|
||||
* Evaluate the qual conditions one at a time. If we find a FALSE
|
||||
* Evaluate the qual conditions one at a time. If we find a FALSE
|
||||
* result, we can stop evaluating and return FALSE --- the AND result
|
||||
* must be FALSE. Also, if we find a NULL result when resultForNull
|
||||
* is FALSE, we can stop and return FALSE --- the AND result must be
|
||||
|
@ -1353,14 +1364,15 @@ ExecQual(List *qual, ExprContext *econtext, bool resultForNull)
|
|||
|
||||
/*
|
||||
* If there is a null clause, consider the qualification to fail.
|
||||
* XXX is this still correct for constraints? It probably shouldn't
|
||||
* happen at all ...
|
||||
* XXX is this still correct for constraints? It probably
|
||||
* shouldn't happen at all ...
|
||||
*/
|
||||
if (clause == NULL)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* pass isDone, but ignore it. We don't iterate over multiple returns
|
||||
* in the qualifications.
|
||||
* pass isDone, but ignore it. We don't iterate over multiple
|
||||
* returns in the qualifications.
|
||||
*/
|
||||
expr_value = ExecEvalExpr(clause, econtext, &isNull, &isDone);
|
||||
|
||||
|
@ -1429,7 +1441,8 @@ ExecTargetList(List *targetlist,
|
|||
HeapTuple newTuple;
|
||||
bool isNull;
|
||||
bool haveDoneIters;
|
||||
static struct tupleDesc NullTupleDesc; /* we assume this inits to zeroes */
|
||||
static struct tupleDesc NullTupleDesc; /* we assume this inits to
|
||||
* zeroes */
|
||||
|
||||
/*
|
||||
* debugging stuff
|
||||
|
@ -1512,7 +1525,8 @@ ExecTargetList(List *targetlist,
|
|||
if (itemIsDone[resind])
|
||||
haveDoneIters = true;
|
||||
else
|
||||
*isDone = false; /* we have undone Iters in the list */
|
||||
*isDone = false; /* we have undone Iters in the
|
||||
* list */
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -1571,7 +1585,9 @@ ExecTargetList(List *targetlist,
|
|||
{
|
||||
if (*isDone)
|
||||
{
|
||||
/* all Iters are done, so return a null indicating tlist set
|
||||
|
||||
/*
|
||||
* all Iters are done, so return a null indicating tlist set
|
||||
* expansion is complete.
|
||||
*/
|
||||
newTuple = NULL;
|
||||
|
@ -1579,21 +1595,24 @@ ExecTargetList(List *targetlist,
|
|||
}
|
||||
else
|
||||
{
|
||||
/* We have some done and some undone Iters. Restart the done ones
|
||||
* so that we can deliver a tuple (if possible).
|
||||
|
||||
/*
|
||||
* We have some done and some undone Iters. Restart the done
|
||||
* ones so that we can deliver a tuple (if possible).
|
||||
*
|
||||
* XXX this code is a crock, because it only works for Iters at
|
||||
* the top level of tlist expressions, and doesn't even work right
|
||||
* for them: you should get all possible combinations of Iter
|
||||
* results, but you won't unless the numbers of values returned by
|
||||
* each are relatively prime. Should have a mechanism more like
|
||||
* aggregate functions, where we make a list of all Iters
|
||||
* contained in the tlist and cycle through their values in a
|
||||
* methodical fashion. To do someday; can't get excited about
|
||||
* fixing a Berkeley feature that's not in SQL92. (The only
|
||||
* reason we're doing this much is that we have to be sure all
|
||||
* the Iters are run to completion, or their subplan executors
|
||||
* will have unreleased resources, e.g. pinned buffers...)
|
||||
* the top level of tlist expressions, and doesn't even work
|
||||
* right for them: you should get all possible combinations of
|
||||
* Iter results, but you won't unless the numbers of values
|
||||
* returned by each are relatively prime. Should have a
|
||||
* mechanism more like aggregate functions, where we make a
|
||||
* list of all Iters contained in the tlist and cycle through
|
||||
* their values in a methodical fashion. To do someday; can't
|
||||
* get excited about fixing a Berkeley feature that's not in
|
||||
* SQL92. (The only reason we're doing this much is that we
|
||||
* have to be sure all the Iters are run to completion, or
|
||||
* their subplan executors will have unreleased resources,
|
||||
* e.g. pinned buffers...)
|
||||
*/
|
||||
foreach(tl, targetlist)
|
||||
{
|
||||
|
@ -1605,16 +1624,18 @@ ExecTargetList(List *targetlist,
|
|||
resdom = tle->resdom;
|
||||
resind = resdom->resno - 1;
|
||||
|
||||
if (IsA(expr, Iter) && itemIsDone[resind])
|
||||
if (IsA(expr, Iter) &&itemIsDone[resind])
|
||||
{
|
||||
constvalue = (Datum) ExecEvalExpr(expr,
|
||||
econtext,
|
||||
&isNull,
|
||||
&itemIsDone[resind]);
|
||||
&itemIsDone[resind]);
|
||||
if (itemIsDone[resind])
|
||||
{
|
||||
/* Oh dear, this Iter is returning an empty set.
|
||||
* Guess we can't make a tuple after all.
|
||||
|
||||
/*
|
||||
* Oh dear, this Iter is returning an empty
|
||||
* set. Guess we can't make a tuple after all.
|
||||
*/
|
||||
*isDone = true;
|
||||
newTuple = NULL;
|
||||
|
@ -1639,6 +1660,7 @@ ExecTargetList(List *targetlist,
|
|||
newTuple = (HeapTuple) heap_formtuple(targettype, values, null_head);
|
||||
|
||||
exit:
|
||||
|
||||
/*
|
||||
* free the status arrays if we palloc'd them
|
||||
*/
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.36 2000/01/27 18:11:27 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.37 2000/04/12 17:15:08 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -188,8 +188,8 @@ ExecCreateTupleTable(int initialSize) /* initial number of slots in
|
|||
* --------------------------------
|
||||
*/
|
||||
void
|
||||
ExecDropTupleTable(TupleTable table, /* tuple table */
|
||||
bool shouldFree) /* true if we should free slot
|
||||
ExecDropTupleTable(TupleTable table, /* tuple table */
|
||||
bool shouldFree) /* true if we should free slot
|
||||
* contents */
|
||||
{
|
||||
int next; /* next available slot */
|
||||
|
@ -262,7 +262,7 @@ TupleTableSlot * /* return: the slot allocated in the tuple
|
|||
ExecAllocTableSlot(TupleTable table)
|
||||
{
|
||||
int slotnum; /* new slot number */
|
||||
TupleTableSlot* slot;
|
||||
TupleTableSlot *slot;
|
||||
|
||||
/* ----------------
|
||||
* sanity checks
|
||||
|
@ -335,8 +335,8 @@ ExecAllocTableSlot(TupleTable table)
|
|||
*
|
||||
* tuple: tuple to store
|
||||
* slot: slot to store it in
|
||||
* buffer: disk buffer if tuple is in a disk page, else InvalidBuffer
|
||||
* shouldFree: true if ExecClearTuple should pfree() the tuple
|
||||
* buffer: disk buffer if tuple is in a disk page, else InvalidBuffer
|
||||
* shouldFree: true if ExecClearTuple should pfree() the tuple
|
||||
* when done with it
|
||||
*
|
||||
* If 'buffer' is not InvalidBuffer, the tuple table code acquires a pin
|
||||
|
@ -350,7 +350,7 @@ ExecAllocTableSlot(TupleTable table)
|
|||
* Another case where it is 'false' is when the referenced tuple is held
|
||||
* in a tuple table slot belonging to a lower-level executor Proc node.
|
||||
* In this case the lower-level slot retains ownership and responsibility
|
||||
* for eventually releasing the tuple. When this method is used, we must
|
||||
* for eventually releasing the tuple. When this method is used, we must
|
||||
* be certain that the upper-level Proc node will lose interest in the tuple
|
||||
* sooner than the lower-level one does! If you're not certain, copy the
|
||||
* lower-level tuple with heap_copytuple and let the upper-level table
|
||||
|
@ -385,7 +385,8 @@ ExecStoreTuple(HeapTuple tuple,
|
|||
slot->ttc_buffer = buffer;
|
||||
slot->ttc_shouldFree = shouldFree;
|
||||
|
||||
/* If tuple is on a disk page, keep the page pinned as long as we hold
|
||||
/*
|
||||
* If tuple is on a disk page, keep the page pinned as long as we hold
|
||||
* a pointer into it.
|
||||
*/
|
||||
if (BufferIsValid(buffer))
|
||||
|
@ -426,7 +427,7 @@ ExecClearTuple(TupleTableSlot *slot) /* slot in which to store tuple */
|
|||
|
||||
slot->val = (HeapTuple) NULL;
|
||||
|
||||
slot->ttc_shouldFree = true; /* probably useless code... */
|
||||
slot->ttc_shouldFree = true;/* probably useless code... */
|
||||
|
||||
/* ----------------
|
||||
* Drop the pin on the referenced buffer, if there is one.
|
||||
|
@ -776,6 +777,7 @@ NodeGetResultTupleSlot(Plan *node)
|
|||
case T_TidScan:
|
||||
{
|
||||
CommonScanState *scanstate = ((IndexScan *) node)->scan.scanstate;
|
||||
|
||||
slot = scanstate->cstate.cs_ResultTupleSlot;
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.54 2000/02/18 09:29:57 inoue Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.55 2000/04/12 17:15:08 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -776,7 +776,7 @@ ExecOpenIndices(Oid resultRelationOid,
|
|||
if (!RelationGetForm(resultRelationInfo->ri_RelationDesc)->relhasindex)
|
||||
return;
|
||||
if (IsIgnoringSystemIndexes() &&
|
||||
IsSystemRelationName(RelationGetRelationName(resultRelationInfo->ri_RelationDesc)))
|
||||
IsSystemRelationName(RelationGetRelationName(resultRelationInfo->ri_RelationDesc)))
|
||||
return;
|
||||
/* ----------------
|
||||
* open pg_index
|
||||
|
@ -923,8 +923,8 @@ ExecOpenIndices(Oid resultRelationOid,
|
|||
|
||||
/*
|
||||
* Hack for not btree and hash indices: they use relation
|
||||
* level exclusive locking on update (i.e. - they are
|
||||
* not ready for MVCC) and so we have to exclusively lock
|
||||
* level exclusive locking on update (i.e. - they are not
|
||||
* ready for MVCC) and so we have to exclusively lock
|
||||
* indices here to prevent deadlocks if we will scan them
|
||||
* - index_beginscan places AccessShareLock, indices
|
||||
* update methods don't use locks at all. We release this
|
||||
|
@ -1186,7 +1186,7 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
|
|||
econtext->ecxt_scantuple = slot;
|
||||
|
||||
/* Skip this index-update if the predicate isn't satisfied */
|
||||
if (! ExecQual((List *) predicate, econtext, false))
|
||||
if (!ExecQual((List *) predicate, econtext, false))
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.32 2000/04/04 21:44:39 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.33 2000/04/12 17:15:09 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -150,6 +150,7 @@ init_execution_state(FunctionCachePtr fcache,
|
|||
static TupleDesc
|
||||
postquel_start(execution_state *es)
|
||||
{
|
||||
|
||||
/*
|
||||
* Do nothing for utility commands. (create, destroy...) DZ -
|
||||
* 30-8-1996
|
||||
|
@ -166,9 +167,9 @@ postquel_getnext(execution_state *es)
|
|||
|
||||
if (es->qd->operation == CMD_UTILITY)
|
||||
{
|
||||
|
||||
/*
|
||||
* Process a utility command. (create, destroy...) DZ -
|
||||
* 30-8-1996
|
||||
* Process a utility command. (create, destroy...) DZ - 30-8-1996
|
||||
*/
|
||||
ProcessUtility(es->qd->parsetree->utilityStmt, es->qd->dest);
|
||||
if (!LAST_POSTQUEL_COMMAND(es))
|
||||
|
@ -184,6 +185,7 @@ postquel_getnext(execution_state *es)
|
|||
static void
|
||||
postquel_end(execution_state *es)
|
||||
{
|
||||
|
||||
/*
|
||||
* Do nothing for utility commands. (create, destroy...) DZ -
|
||||
* 30-8-1996
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
* value1 = finalfunc(value1, value2)
|
||||
*
|
||||
* If initcond1 is NULL then the first non-NULL input_value is
|
||||
* assigned directly to value1. sfunc1 isn't applied until value1
|
||||
* assigned directly to value1. sfunc1 isn't applied until value1
|
||||
* is non-NULL.
|
||||
*
|
||||
* sfunc1 is never applied when the current tuple's input_value is NULL.
|
||||
|
@ -24,7 +24,7 @@
|
|||
* (usenulls was formerly used for COUNT(*), but is no longer needed for
|
||||
* that purpose; as of 10/1999 the support for usenulls is dead code.
|
||||
* I have not removed it because it seems like a potentially useful
|
||||
* feature for user-defined aggregates. We'd just need to add a
|
||||
* feature for user-defined aggregates. We'd just need to add a
|
||||
* flag column to pg_aggregate and a parameter to CREATE AGGREGATE...)
|
||||
*
|
||||
*
|
||||
|
@ -32,7 +32,7 @@
|
|||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAgg.c,v 1.62 2000/01/26 05:56:22 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAgg.c,v 1.63 2000/04/12 17:15:09 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -56,6 +56,7 @@
|
|||
*/
|
||||
typedef struct AggStatePerAggData
|
||||
{
|
||||
|
||||
/*
|
||||
* These values are set up during ExecInitAgg() and do not change
|
||||
* thereafter:
|
||||
|
@ -68,6 +69,7 @@ typedef struct AggStatePerAggData
|
|||
Oid xfn1_oid;
|
||||
Oid xfn2_oid;
|
||||
Oid finalfn_oid;
|
||||
|
||||
/*
|
||||
* fmgr lookup data for transfer functions --- only valid when
|
||||
* corresponding oid is not InvalidOid
|
||||
|
@ -75,18 +77,21 @@ typedef struct AggStatePerAggData
|
|||
FmgrInfo xfn1;
|
||||
FmgrInfo xfn2;
|
||||
FmgrInfo finalfn;
|
||||
|
||||
/*
|
||||
* Type of input data and Oid of sort operator to use for it;
|
||||
* only set/used when aggregate has DISTINCT flag. (These are not
|
||||
* used directly by nodeAgg, but must be passed to the Tuplesort object.)
|
||||
* Type of input data and Oid of sort operator to use for it; only
|
||||
* set/used when aggregate has DISTINCT flag. (These are not used
|
||||
* directly by nodeAgg, but must be passed to the Tuplesort object.)
|
||||
*/
|
||||
Oid inputType;
|
||||
Oid sortOperator;
|
||||
|
||||
/*
|
||||
* fmgr lookup data for input type's equality operator --- only set/used
|
||||
* when aggregate has DISTINCT flag.
|
||||
* fmgr lookup data for input type's equality operator --- only
|
||||
* set/used when aggregate has DISTINCT flag.
|
||||
*/
|
||||
FmgrInfo equalfn;
|
||||
|
||||
/*
|
||||
* initial values from pg_aggregate entry
|
||||
*/
|
||||
|
@ -94,6 +99,7 @@ typedef struct AggStatePerAggData
|
|||
Datum initValue2; /* for transtype2 */
|
||||
bool initValue1IsNull,
|
||||
initValue2IsNull;
|
||||
|
||||
/*
|
||||
* We need the len and byval info for the agg's input and transition
|
||||
* data types in order to know how to copy/delete values.
|
||||
|
@ -106,14 +112,14 @@ typedef struct AggStatePerAggData
|
|||
transtype2ByVal;
|
||||
|
||||
/*
|
||||
* These values are working state that is initialized at the start
|
||||
* of an input tuple group and updated for each input tuple.
|
||||
* These values are working state that is initialized at the start of
|
||||
* an input tuple group and updated for each input tuple.
|
||||
*
|
||||
* For a simple (non DISTINCT) aggregate, we just feed the input values
|
||||
* straight to the transition functions. If it's DISTINCT, we pass the
|
||||
* input values into a Tuplesort object; then at completion of the input
|
||||
* tuple group, we scan the sorted values, eliminate duplicates, and run
|
||||
* the transition functions on the rest.
|
||||
* straight to the transition functions. If it's DISTINCT, we pass
|
||||
* the input values into a Tuplesort object; then at completion of the
|
||||
* input tuple group, we scan the sorted values, eliminate duplicates,
|
||||
* and run the transition functions on the rest.
|
||||
*/
|
||||
|
||||
Tuplesortstate *sortstate; /* sort object, if a DISTINCT agg */
|
||||
|
@ -123,20 +129,22 @@ typedef struct AggStatePerAggData
|
|||
bool value1IsNull,
|
||||
value2IsNull;
|
||||
bool noInitValue; /* true if value1 not set yet */
|
||||
|
||||
/*
|
||||
* Note: right now, noInitValue always has the same value as value1IsNull.
|
||||
* But we should keep them separate because once the fmgr interface is
|
||||
* fixed, we'll need to distinguish a null returned by transfn1 from
|
||||
* a null we haven't yet replaced with an input value.
|
||||
* Note: right now, noInitValue always has the same value as
|
||||
* value1IsNull. But we should keep them separate because once the
|
||||
* fmgr interface is fixed, we'll need to distinguish a null returned
|
||||
* by transfn1 from a null we haven't yet replaced with an input
|
||||
* value.
|
||||
*/
|
||||
} AggStatePerAggData;
|
||||
|
||||
|
||||
static void initialize_aggregate (AggStatePerAgg peraggstate);
|
||||
static void advance_transition_functions (AggStatePerAgg peraggstate,
|
||||
Datum newVal, bool isNull);
|
||||
static void finalize_aggregate (AggStatePerAgg peraggstate,
|
||||
Datum *resultVal, bool *resultIsNull);
|
||||
static void initialize_aggregate(AggStatePerAgg peraggstate);
|
||||
static void advance_transition_functions(AggStatePerAgg peraggstate,
|
||||
Datum newVal, bool isNull);
|
||||
static void finalize_aggregate(AggStatePerAgg peraggstate,
|
||||
Datum *resultVal, bool *resultIsNull);
|
||||
static Datum copyDatum(Datum val, int typLen, bool typByVal);
|
||||
|
||||
|
||||
|
@ -144,17 +152,19 @@ static Datum copyDatum(Datum val, int typLen, bool typByVal);
|
|||
* Initialize one aggregate for a new set of input values.
|
||||
*/
|
||||
static void
|
||||
initialize_aggregate (AggStatePerAgg peraggstate)
|
||||
initialize_aggregate(AggStatePerAgg peraggstate)
|
||||
{
|
||||
Aggref *aggref = peraggstate->aggref;
|
||||
Aggref *aggref = peraggstate->aggref;
|
||||
|
||||
/*
|
||||
* Start a fresh sort operation for each DISTINCT aggregate.
|
||||
*/
|
||||
if (aggref->aggdistinct)
|
||||
{
|
||||
/* In case of rescan, maybe there could be an uncompleted
|
||||
* sort operation? Clean it up if so.
|
||||
|
||||
/*
|
||||
* In case of rescan, maybe there could be an uncompleted sort
|
||||
* operation? Clean it up if so.
|
||||
*/
|
||||
if (peraggstate->sortstate)
|
||||
tuplesort_end(peraggstate->sortstate);
|
||||
|
@ -169,8 +179,8 @@ initialize_aggregate (AggStatePerAgg peraggstate)
|
|||
* (Re)set value1 and value2 to their initial values.
|
||||
*/
|
||||
if (OidIsValid(peraggstate->xfn1_oid) &&
|
||||
! peraggstate->initValue1IsNull)
|
||||
peraggstate->value1 = copyDatum(peraggstate->initValue1,
|
||||
!peraggstate->initValue1IsNull)
|
||||
peraggstate->value1 = copyDatum(peraggstate->initValue1,
|
||||
peraggstate->transtype1Len,
|
||||
peraggstate->transtype1ByVal);
|
||||
else
|
||||
|
@ -178,8 +188,8 @@ initialize_aggregate (AggStatePerAgg peraggstate)
|
|||
peraggstate->value1IsNull = peraggstate->initValue1IsNull;
|
||||
|
||||
if (OidIsValid(peraggstate->xfn2_oid) &&
|
||||
! peraggstate->initValue2IsNull)
|
||||
peraggstate->value2 = copyDatum(peraggstate->initValue2,
|
||||
!peraggstate->initValue2IsNull)
|
||||
peraggstate->value2 = copyDatum(peraggstate->initValue2,
|
||||
peraggstate->transtype2Len,
|
||||
peraggstate->transtype2ByVal);
|
||||
else
|
||||
|
@ -205,8 +215,8 @@ initialize_aggregate (AggStatePerAgg peraggstate)
|
|||
* out before reaching here.
|
||||
*/
|
||||
static void
|
||||
advance_transition_functions (AggStatePerAgg peraggstate,
|
||||
Datum newVal, bool isNull)
|
||||
advance_transition_functions(AggStatePerAgg peraggstate,
|
||||
Datum newVal, bool isNull)
|
||||
{
|
||||
Datum args[2];
|
||||
|
||||
|
@ -214,6 +224,7 @@ advance_transition_functions (AggStatePerAgg peraggstate,
|
|||
{
|
||||
if (peraggstate->noInitValue)
|
||||
{
|
||||
|
||||
/*
|
||||
* value1 has not been initialized. This is the first non-NULL
|
||||
* input value. We use it as the initial value for value1.
|
||||
|
@ -238,7 +249,7 @@ advance_transition_functions (AggStatePerAgg peraggstate,
|
|||
newVal = (Datum) fmgr_c(&peraggstate->xfn1,
|
||||
(FmgrValues *) args,
|
||||
&isNull);
|
||||
if (! peraggstate->transtype1ByVal)
|
||||
if (!peraggstate->transtype1ByVal)
|
||||
pfree(peraggstate->value1);
|
||||
peraggstate->value1 = newVal;
|
||||
}
|
||||
|
@ -252,7 +263,7 @@ advance_transition_functions (AggStatePerAgg peraggstate,
|
|||
newVal = (Datum) fmgr_c(&peraggstate->xfn2,
|
||||
(FmgrValues *) args,
|
||||
&isNull);
|
||||
if (! peraggstate->transtype2ByVal)
|
||||
if (!peraggstate->transtype2ByVal)
|
||||
pfree(peraggstate->value2);
|
||||
peraggstate->value2 = newVal;
|
||||
}
|
||||
|
@ -262,17 +273,18 @@ advance_transition_functions (AggStatePerAgg peraggstate,
|
|||
* Compute the final value of one aggregate.
|
||||
*/
|
||||
static void
|
||||
finalize_aggregate (AggStatePerAgg peraggstate,
|
||||
Datum *resultVal, bool *resultIsNull)
|
||||
finalize_aggregate(AggStatePerAgg peraggstate,
|
||||
Datum *resultVal, bool *resultIsNull)
|
||||
{
|
||||
Aggref *aggref = peraggstate->aggref;
|
||||
char *args[2];
|
||||
|
||||
/*
|
||||
* If it's a DISTINCT aggregate, all we've done so far is to stuff the
|
||||
* input values into the sort object. Complete the sort, then run
|
||||
* the transition functions on the non-duplicate values. Note that
|
||||
* DISTINCT always suppresses nulls, per SQL spec, regardless of usenulls.
|
||||
* input values into the sort object. Complete the sort, then run the
|
||||
* transition functions on the non-duplicate values. Note that
|
||||
* DISTINCT always suppresses nulls, per SQL spec, regardless of
|
||||
* usenulls.
|
||||
*/
|
||||
if (aggref->aggdistinct)
|
||||
{
|
||||
|
@ -289,41 +301,41 @@ finalize_aggregate (AggStatePerAgg peraggstate,
|
|||
continue;
|
||||
if (haveOldVal)
|
||||
{
|
||||
Datum equal;
|
||||
Datum equal;
|
||||
|
||||
equal = (Datum) (*fmgr_faddr(&peraggstate->equalfn)) (oldVal,
|
||||
newVal);
|
||||
newVal);
|
||||
if (DatumGetInt32(equal) != 0)
|
||||
{
|
||||
if (! peraggstate->inputtypeByVal)
|
||||
if (!peraggstate->inputtypeByVal)
|
||||
pfree(DatumGetPointer(newVal));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
advance_transition_functions(peraggstate, newVal, false);
|
||||
if (haveOldVal && ! peraggstate->inputtypeByVal)
|
||||
if (haveOldVal && !peraggstate->inputtypeByVal)
|
||||
pfree(DatumGetPointer(oldVal));
|
||||
oldVal = newVal;
|
||||
haveOldVal = true;
|
||||
}
|
||||
if (haveOldVal && ! peraggstate->inputtypeByVal)
|
||||
if (haveOldVal && !peraggstate->inputtypeByVal)
|
||||
pfree(DatumGetPointer(oldVal));
|
||||
tuplesort_end(peraggstate->sortstate);
|
||||
peraggstate->sortstate = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Now apply the agg's finalfn, or substitute the appropriate transition
|
||||
* value if there is no finalfn.
|
||||
* Now apply the agg's finalfn, or substitute the appropriate
|
||||
* transition value if there is no finalfn.
|
||||
*
|
||||
* XXX For now, only apply finalfn if we got at least one
|
||||
* non-null input value. This prevents zero divide in AVG().
|
||||
* If we had cleaner handling of null inputs/results in functions,
|
||||
* we could probably take out this hack and define the result
|
||||
* for no inputs as whatever finalfn returns for null input.
|
||||
* XXX For now, only apply finalfn if we got at least one non-null input
|
||||
* value. This prevents zero divide in AVG(). If we had cleaner
|
||||
* handling of null inputs/results in functions, we could probably
|
||||
* take out this hack and define the result for no inputs as whatever
|
||||
* finalfn returns for null input.
|
||||
*/
|
||||
if (OidIsValid(peraggstate->finalfn_oid) &&
|
||||
! peraggstate->noInitValue)
|
||||
!peraggstate->noInitValue)
|
||||
{
|
||||
if (peraggstate->finalfn.fn_nargs > 1)
|
||||
{
|
||||
|
@ -361,17 +373,17 @@ finalize_aggregate (AggStatePerAgg peraggstate,
|
|||
elog(ERROR, "ExecAgg: no valid transition functions??");
|
||||
|
||||
/*
|
||||
* Release any per-group working storage, unless we're passing
|
||||
* it back as the result of the aggregate.
|
||||
* Release any per-group working storage, unless we're passing it back
|
||||
* as the result of the aggregate.
|
||||
*/
|
||||
if (OidIsValid(peraggstate->xfn1_oid) &&
|
||||
! peraggstate->value1IsNull &&
|
||||
! peraggstate->transtype1ByVal)
|
||||
!peraggstate->value1IsNull &&
|
||||
!peraggstate->transtype1ByVal)
|
||||
pfree(peraggstate->value1);
|
||||
|
||||
|
||||
if (OidIsValid(peraggstate->xfn2_oid) &&
|
||||
! peraggstate->value2IsNull &&
|
||||
! peraggstate->transtype2ByVal)
|
||||
!peraggstate->value2IsNull &&
|
||||
!peraggstate->transtype2ByVal)
|
||||
pfree(peraggstate->value2);
|
||||
}
|
||||
|
||||
|
@ -383,8 +395,8 @@ finalize_aggregate (AggStatePerAgg peraggstate,
|
|||
* the appropriate attribute for each aggregate function use (Aggref
|
||||
* node) appearing in the targetlist or qual of the node. The number
|
||||
* of tuples to aggregate over depends on whether a GROUP BY clause is
|
||||
* present. We can produce an aggregate result row per group, or just
|
||||
* one for the whole query. The value of each aggregate is stored in
|
||||
* present. We can produce an aggregate result row per group, or just
|
||||
* one for the whole query. The value of each aggregate is stored in
|
||||
* the expression context to be used when ExecProject evaluates the
|
||||
* result tuple.
|
||||
*
|
||||
|
@ -403,7 +415,7 @@ ExecAgg(Agg *node)
|
|||
ProjectionInfo *projInfo;
|
||||
Datum *aggvalues;
|
||||
bool *aggnulls;
|
||||
AggStatePerAgg peragg;
|
||||
AggStatePerAgg peragg;
|
||||
TupleTableSlot *resultSlot;
|
||||
HeapTuple inputTuple;
|
||||
int aggno;
|
||||
|
@ -437,7 +449,7 @@ ExecAgg(Agg *node)
|
|||
*/
|
||||
for (aggno = 0; aggno < aggstate->numaggs; aggno++)
|
||||
{
|
||||
AggStatePerAgg peraggstate = &peragg[aggno];
|
||||
AggStatePerAgg peraggstate = &peragg[aggno];
|
||||
|
||||
initialize_aggregate(peraggstate);
|
||||
}
|
||||
|
@ -459,9 +471,9 @@ ExecAgg(Agg *node)
|
|||
|
||||
for (aggno = 0; aggno < aggstate->numaggs; aggno++)
|
||||
{
|
||||
AggStatePerAgg peraggstate = &peragg[aggno];
|
||||
Aggref *aggref = peraggstate->aggref;
|
||||
Datum newVal;
|
||||
AggStatePerAgg peraggstate = &peragg[aggno];
|
||||
Aggref *aggref = peraggstate->aggref;
|
||||
Datum newVal;
|
||||
|
||||
newVal = ExecEvalExpr(aggref->target, econtext,
|
||||
&isNull, &isDone);
|
||||
|
@ -479,37 +491,37 @@ ExecAgg(Agg *node)
|
|||
|
||||
/*
|
||||
* Keep a copy of the first input tuple for the projection.
|
||||
* (We only need one since only the GROUP BY columns in it
|
||||
* can be referenced, and these will be the same for all
|
||||
* tuples aggregated over.)
|
||||
* (We only need one since only the GROUP BY columns in it can
|
||||
* be referenced, and these will be the same for all tuples
|
||||
* aggregated over.)
|
||||
*/
|
||||
if (!inputTuple)
|
||||
inputTuple = heap_copytuple(outerslot->val);
|
||||
}
|
||||
|
||||
/*
|
||||
* Done scanning input tuple group.
|
||||
* Finalize each aggregate calculation.
|
||||
* Done scanning input tuple group. Finalize each aggregate
|
||||
* calculation.
|
||||
*/
|
||||
for (aggno = 0; aggno < aggstate->numaggs; aggno++)
|
||||
{
|
||||
AggStatePerAgg peraggstate = &peragg[aggno];
|
||||
AggStatePerAgg peraggstate = &peragg[aggno];
|
||||
|
||||
finalize_aggregate(peraggstate,
|
||||
& aggvalues[aggno], & aggnulls[aggno]);
|
||||
&aggvalues[aggno], &aggnulls[aggno]);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the outerPlan is a Group node, we will reach here after each
|
||||
* group. We are not done unless the Group node is done (a little
|
||||
* ugliness here while we reach into the Group's state to find out).
|
||||
* Furthermore, when grouping we return nothing at all unless we
|
||||
* had some input tuple(s). By the nature of Group, there are
|
||||
* no empty groups, so if we get here with no input the whole scan
|
||||
* is empty.
|
||||
* ugliness here while we reach into the Group's state to find
|
||||
* out). Furthermore, when grouping we return nothing at all
|
||||
* unless we had some input tuple(s). By the nature of Group,
|
||||
* there are no empty groups, so if we get here with no input the
|
||||
* whole scan is empty.
|
||||
*
|
||||
* If the outerPlan isn't a Group, we are done when we get here,
|
||||
* and we will emit a (single) tuple even if there were no input
|
||||
* If the outerPlan isn't a Group, we are done when we get here, and
|
||||
* we will emit a (single) tuple even if there were no input
|
||||
* tuples.
|
||||
*/
|
||||
if (IsA(outerPlan, Group))
|
||||
|
@ -523,17 +535,18 @@ ExecAgg(Agg *node)
|
|||
else
|
||||
{
|
||||
aggstate->agg_done = true;
|
||||
|
||||
/*
|
||||
* If inputtuple==NULL (ie, the outerPlan didn't return anything),
|
||||
* create a dummy all-nulls input tuple for use by execProject.
|
||||
* 99.44% of the time this is a waste of cycles, because
|
||||
* ordinarily the projected output tuple's targetlist cannot
|
||||
* contain any direct (non-aggregated) references to input
|
||||
* columns, so the dummy tuple will not be referenced. However
|
||||
* there are special cases where this isn't so --- in particular
|
||||
* an UPDATE involving an aggregate will have a targetlist
|
||||
* reference to ctid. We need to return a null for ctid in that
|
||||
* situation, not coredump.
|
||||
* If inputtuple==NULL (ie, the outerPlan didn't return
|
||||
* anything), create a dummy all-nulls input tuple for use by
|
||||
* execProject. 99.44% of the time this is a waste of cycles,
|
||||
* because ordinarily the projected output tuple's targetlist
|
||||
* cannot contain any direct (non-aggregated) references to
|
||||
* input columns, so the dummy tuple will not be referenced.
|
||||
* However there are special cases where this isn't so --- in
|
||||
* particular an UPDATE involving an aggregate will have a
|
||||
* targetlist reference to ctid. We need to return a null for
|
||||
* ctid in that situation, not coredump.
|
||||
*
|
||||
* The values returned for the aggregates will be the initial
|
||||
* values of the transition functions.
|
||||
|
@ -550,7 +563,7 @@ ExecAgg(Agg *node)
|
|||
/* watch out for null input tuples, though... */
|
||||
if (tupType && tupValue)
|
||||
{
|
||||
null_array = (char *) palloc(sizeof(char)*tupType->natts);
|
||||
null_array = (char *) palloc(sizeof(char) * tupType->natts);
|
||||
for (attnum = 0; attnum < tupType->natts; attnum++)
|
||||
null_array[attnum] = 'n';
|
||||
inputTuple = heap_formtuple(tupType, tupValue, null_array);
|
||||
|
@ -571,17 +584,17 @@ ExecAgg(Agg *node)
|
|||
|
||||
/*
|
||||
* Form a projection tuple using the aggregate results and the
|
||||
* representative input tuple. Store it in the result tuple slot.
|
||||
* representative input tuple. Store it in the result tuple slot.
|
||||
*/
|
||||
resultSlot = ExecProject(projInfo, &isDone);
|
||||
|
||||
/*
|
||||
* If the completed tuple does not match the qualifications,
|
||||
* it is ignored and we loop back to try to process another group.
|
||||
* If the completed tuple does not match the qualifications, it is
|
||||
* ignored and we loop back to try to process another group.
|
||||
* Otherwise, return the tuple.
|
||||
*/
|
||||
}
|
||||
while (! ExecQual(node->plan.qual, econtext, false));
|
||||
while (!ExecQual(node->plan.qual, econtext, false));
|
||||
|
||||
return resultSlot;
|
||||
}
|
||||
|
@ -596,13 +609,13 @@ ExecAgg(Agg *node)
|
|||
bool
|
||||
ExecInitAgg(Agg *node, EState *estate, Plan *parent)
|
||||
{
|
||||
AggState *aggstate;
|
||||
AggStatePerAgg peragg;
|
||||
Plan *outerPlan;
|
||||
ExprContext *econtext;
|
||||
int numaggs,
|
||||
aggno;
|
||||
List *alist;
|
||||
AggState *aggstate;
|
||||
AggStatePerAgg peragg;
|
||||
Plan *outerPlan;
|
||||
ExprContext *econtext;
|
||||
int numaggs,
|
||||
aggno;
|
||||
List *alist;
|
||||
|
||||
/*
|
||||
* assign the node's execution state
|
||||
|
@ -620,21 +633,23 @@ ExecInitAgg(Agg *node, EState *estate, Plan *parent)
|
|||
* find aggregates in targetlist and quals
|
||||
*
|
||||
* Note: pull_agg_clauses also checks that no aggs contain other agg
|
||||
* calls in their arguments. This would make no sense under SQL semantics
|
||||
* anyway (and it's forbidden by the spec). Because that is true, we
|
||||
* don't need to worry about evaluating the aggs in any particular order.
|
||||
* calls in their arguments. This would make no sense under SQL
|
||||
* semantics anyway (and it's forbidden by the spec). Because that is
|
||||
* true, we don't need to worry about evaluating the aggs in any
|
||||
* particular order.
|
||||
*/
|
||||
aggstate->aggs = nconc(pull_agg_clause((Node *) node->plan.targetlist),
|
||||
pull_agg_clause((Node *) node->plan.qual));
|
||||
aggstate->numaggs = numaggs = length(aggstate->aggs);
|
||||
if (numaggs <= 0)
|
||||
{
|
||||
|
||||
/*
|
||||
* This used to be treated as an error, but we can't do that anymore
|
||||
* because constant-expression simplification could optimize away
|
||||
* all of the Aggrefs in the targetlist and qual. So, just make a
|
||||
* debug note, and force numaggs positive so that palloc()s below
|
||||
* don't choke.
|
||||
* This used to be treated as an error, but we can't do that
|
||||
* anymore because constant-expression simplification could
|
||||
* optimize away all of the Aggrefs in the targetlist and qual.
|
||||
* So, just make a debug note, and force numaggs positive so that
|
||||
* palloc()s below don't choke.
|
||||
*/
|
||||
elog(DEBUG, "ExecInitAgg: could not find any aggregate functions");
|
||||
numaggs = 1;
|
||||
|
@ -655,8 +670,8 @@ ExecInitAgg(Agg *node, EState *estate, Plan *parent)
|
|||
ExecInitResultTupleSlot(estate, &aggstate->csstate.cstate);
|
||||
|
||||
/*
|
||||
* Set up aggregate-result storage in the expr context,
|
||||
* and also allocate my private per-agg working storage
|
||||
* Set up aggregate-result storage in the expr context, and also
|
||||
* allocate my private per-agg working storage
|
||||
*/
|
||||
econtext = aggstate->csstate.cstate.cs_ExprContext;
|
||||
econtext->ecxt_aggvalues = (Datum *) palloc(sizeof(Datum) * numaggs);
|
||||
|
@ -693,15 +708,15 @@ ExecInitAgg(Agg *node, EState *estate, Plan *parent)
|
|||
aggno = -1;
|
||||
foreach(alist, aggstate->aggs)
|
||||
{
|
||||
Aggref *aggref = (Aggref *) lfirst(alist);
|
||||
AggStatePerAgg peraggstate = &peragg[++aggno];
|
||||
char *aggname = aggref->aggname;
|
||||
HeapTuple aggTuple;
|
||||
Aggref *aggref = (Aggref *) lfirst(alist);
|
||||
AggStatePerAgg peraggstate = &peragg[++aggno];
|
||||
char *aggname = aggref->aggname;
|
||||
HeapTuple aggTuple;
|
||||
Form_pg_aggregate aggform;
|
||||
Type typeInfo;
|
||||
Oid xfn1_oid,
|
||||
xfn2_oid,
|
||||
finalfn_oid;
|
||||
Type typeInfo;
|
||||
Oid xfn1_oid,
|
||||
xfn2_oid,
|
||||
finalfn_oid;
|
||||
|
||||
/* Mark Aggref node with its associated index in the result array */
|
||||
aggref->aggno = aggno;
|
||||
|
@ -762,9 +777,7 @@ ExecInitAgg(Agg *node, EState *estate, Plan *parent)
|
|||
}
|
||||
|
||||
if (OidIsValid(finalfn_oid))
|
||||
{
|
||||
fmgr_info(finalfn_oid, &peraggstate->finalfn);
|
||||
}
|
||||
|
||||
if (aggref->aggdistinct)
|
||||
{
|
||||
|
@ -848,7 +861,7 @@ copyDatum(Datum val, int typLen, bool typByVal)
|
|||
return val;
|
||||
else
|
||||
{
|
||||
char *newVal;
|
||||
char *newVal;
|
||||
|
||||
if (typLen == -1) /* variable length type? */
|
||||
typLen = VARSIZE((struct varlena *) DatumGetPointer(val));
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.29 2000/01/26 05:56:22 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.30 2000/04/12 17:15:09 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -251,9 +251,9 @@ ExecInitAppend(Append *node, EState *estate, Plan *parent)
|
|||
|
||||
foreach(rtentryP, rtable)
|
||||
{
|
||||
RangeTblEntry *rtentry = lfirst(rtentryP);
|
||||
Oid reloid;
|
||||
RelationInfo *rri;
|
||||
RangeTblEntry *rtentry = lfirst(rtentryP);
|
||||
Oid reloid;
|
||||
RelationInfo *rri;
|
||||
|
||||
reloid = rtentry->relid;
|
||||
rri = makeNode(RelationInfo);
|
||||
|
@ -304,6 +304,7 @@ ExecInitAppend(Append *node, EState *estate, Plan *parent)
|
|||
{
|
||||
JunkFilter *j = ExecInitJunkFilter(initNode->targetlist,
|
||||
ExecGetTupType(initNode));
|
||||
|
||||
junkList = lappend(junkList, j);
|
||||
}
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
* locate group boundaries.
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeGroup.c,v 1.33 2000/01/27 18:11:27 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeGroup.c,v 1.34 2000/04/12 17:15:09 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -97,8 +97,9 @@ ExecGroupEveryTuple(Group *node)
|
|||
{
|
||||
grpstate->grp_useFirstTuple = FALSE;
|
||||
|
||||
/* note we rely on subplan to hold ownership of the tuple
|
||||
* for as long as we need it; we don't copy it.
|
||||
/*
|
||||
* note we rely on subplan to hold ownership of the tuple for as
|
||||
* long as we need it; we don't copy it.
|
||||
*/
|
||||
ExecStoreTuple(grpstate->grp_firstTuple,
|
||||
grpstate->csstate.css_ScanTupleSlot,
|
||||
|
@ -122,17 +123,20 @@ ExecGroupEveryTuple(Group *node)
|
|||
}
|
||||
else
|
||||
{
|
||||
|
||||
/*
|
||||
* Compare with first tuple and see if this tuple is of the
|
||||
* same group.
|
||||
*/
|
||||
if (! execTuplesMatch(firsttuple, outerTuple,
|
||||
tupdesc,
|
||||
node->numCols, node->grpColIdx,
|
||||
grpstate->eqfunctions))
|
||||
if (!execTuplesMatch(firsttuple, outerTuple,
|
||||
tupdesc,
|
||||
node->numCols, node->grpColIdx,
|
||||
grpstate->eqfunctions))
|
||||
{
|
||||
|
||||
/*
|
||||
* No; save the tuple to return it next time, and return NULL
|
||||
* No; save the tuple to return it next time, and return
|
||||
* NULL
|
||||
*/
|
||||
grpstate->grp_useFirstTuple = TRUE;
|
||||
heap_freetuple(firsttuple);
|
||||
|
@ -142,8 +146,9 @@ ExecGroupEveryTuple(Group *node)
|
|||
}
|
||||
}
|
||||
|
||||
/* note we rely on subplan to hold ownership of the tuple
|
||||
* for as long as we need it; we don't copy it.
|
||||
/*
|
||||
* note we rely on subplan to hold ownership of the tuple for as
|
||||
* long as we need it; we don't copy it.
|
||||
*/
|
||||
ExecStoreTuple(outerTuple,
|
||||
grpstate->csstate.css_ScanTupleSlot,
|
||||
|
@ -227,13 +232,13 @@ ExecGroupOneTuple(Group *node)
|
|||
outerTuple = outerslot->val;
|
||||
|
||||
/*
|
||||
* Compare with first tuple and see if this tuple is of the
|
||||
* same group.
|
||||
* Compare with first tuple and see if this tuple is of the same
|
||||
* group.
|
||||
*/
|
||||
if (! execTuplesMatch(firsttuple, outerTuple,
|
||||
tupdesc,
|
||||
node->numCols, node->grpColIdx,
|
||||
grpstate->eqfunctions))
|
||||
if (!execTuplesMatch(firsttuple, outerTuple,
|
||||
tupdesc,
|
||||
node->numCols, node->grpColIdx,
|
||||
grpstate->eqfunctions))
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -244,8 +249,9 @@ ExecGroupOneTuple(Group *node)
|
|||
*/
|
||||
projInfo = grpstate->csstate.cstate.cs_ProjInfo;
|
||||
|
||||
/* note we rely on subplan to hold ownership of the tuple
|
||||
* for as long as we need it; we don't copy it.
|
||||
/*
|
||||
* note we rely on subplan to hold ownership of the tuple for as long
|
||||
* as we need it; we don't copy it.
|
||||
*/
|
||||
ExecStoreTuple(firsttuple,
|
||||
grpstate->csstate.css_ScanTupleSlot,
|
||||
|
@ -418,7 +424,7 @@ execTuplesMatch(HeapTuple tuple1,
|
|||
* start comparing at the last field (least significant sort key).
|
||||
* That's the most likely to be different...
|
||||
*/
|
||||
for (i = numCols; --i >= 0; )
|
||||
for (i = numCols; --i >= 0;)
|
||||
{
|
||||
AttrNumber att = matchColIdx[i];
|
||||
Datum attr1,
|
||||
|
@ -445,7 +451,7 @@ execTuplesMatch(HeapTuple tuple1,
|
|||
|
||||
/* Apply the type-specific equality function */
|
||||
|
||||
equal = (Datum) (*fmgr_faddr(& eqfunctions[i])) (attr1, attr2);
|
||||
equal = (Datum) (*fmgr_faddr(&eqfunctions[i])) (attr1, attr2);
|
||||
|
||||
if (DatumGetInt32(equal) == 0)
|
||||
return FALSE;
|
||||
|
@ -459,7 +465,7 @@ execTuplesMatch(HeapTuple tuple1,
|
|||
* Look up the equality functions needed for execTuplesMatch.
|
||||
* The result is a palloc'd array.
|
||||
*/
|
||||
FmgrInfo *
|
||||
FmgrInfo *
|
||||
execTuplesMatchPrepare(TupleDesc tupdesc,
|
||||
int numCols,
|
||||
AttrNumber *matchColIdx)
|
||||
|
@ -481,7 +487,7 @@ execTuplesMatchPrepare(TupleDesc tupdesc,
|
|||
typeidTypeName(typid));
|
||||
}
|
||||
pgopform = (Form_pg_operator) GETSTRUCT(eq_operator);
|
||||
fmgr_info(pgopform->oprcode, & eqfunctions[i]);
|
||||
fmgr_info(pgopform->oprcode, &eqfunctions[i]);
|
||||
}
|
||||
|
||||
return eqfunctions;
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.48 2000/04/07 00:30:41 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.49 2000/04/12 17:15:09 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -106,7 +106,7 @@ IndexNext(IndexScan *node)
|
|||
direction = BackwardScanDirection;
|
||||
else if (ScanDirectionIsBackward(direction))
|
||||
direction = ForwardScanDirection;
|
||||
}
|
||||
}
|
||||
snapshot = estate->es_snapshot;
|
||||
scanstate = node->scan.scanstate;
|
||||
indexstate = node->indxstate;
|
||||
|
@ -195,11 +195,11 @@ IndexNext(IndexScan *node)
|
|||
List *qual;
|
||||
|
||||
/*
|
||||
* store the scanned tuple in the scan tuple slot of
|
||||
* the scan state. Eventually we will only do this and not
|
||||
* return a tuple. Note: we pass 'false' because tuples
|
||||
* returned by amgetnext are pointers onto disk pages and
|
||||
* must not be pfree()'d.
|
||||
* store the scanned tuple in the scan tuple slot of the
|
||||
* scan state. Eventually we will only do this and not
|
||||
* return a tuple. Note: we pass 'false' because tuples
|
||||
* returned by amgetnext are pointers onto disk pages and
|
||||
* must not be pfree()'d.
|
||||
*/
|
||||
ExecStoreTuple(tuple, /* tuple to store */
|
||||
slot, /* slot to store in */
|
||||
|
@ -208,16 +208,17 @@ IndexNext(IndexScan *node)
|
|||
|
||||
/*
|
||||
* At this point we have an extra pin on the buffer,
|
||||
* because ExecStoreTuple incremented the pin count.
|
||||
* Drop our local pin.
|
||||
* because ExecStoreTuple incremented the pin count. Drop
|
||||
* our local pin.
|
||||
*/
|
||||
ReleaseBuffer(buffer);
|
||||
|
||||
/*
|
||||
* We must check to see if the current tuple was already
|
||||
* matched by an earlier index, so we don't double-report it.
|
||||
* We do this by passing the tuple through ExecQual and
|
||||
* checking for failure with all previous qualifications.
|
||||
* matched by an earlier index, so we don't double-report
|
||||
* it. We do this by passing the tuple through ExecQual
|
||||
* and checking for failure with all previous
|
||||
* qualifications.
|
||||
*/
|
||||
scanstate->cstate.cs_ExprContext->ecxt_scantuple = slot;
|
||||
qual = node->indxqualorig;
|
||||
|
@ -234,7 +235,7 @@ IndexNext(IndexScan *node)
|
|||
qual = lnext(qual);
|
||||
}
|
||||
if (!prev_matches)
|
||||
return slot; /* OK to return tuple */
|
||||
return slot;/* OK to return tuple */
|
||||
/* Duplicate tuple, so drop it and loop back for another */
|
||||
ExecClearTuple(slot);
|
||||
}
|
||||
|
@ -380,13 +381,14 @@ ExecIndexReScan(IndexScan *node, ExprContext *exprCtxt, Plan *parent)
|
|||
scanexpr = (run_keys[j] == RIGHT_OP) ?
|
||||
(Node *) get_rightop(clause) :
|
||||
(Node *) get_leftop(clause);
|
||||
|
||||
/*
|
||||
* pass in isDone but ignore it. We don't iterate in
|
||||
* quals
|
||||
*/
|
||||
scanvalue = (Datum)
|
||||
ExecEvalExpr(scanexpr,
|
||||
node->scan.scanstate->cstate.cs_ExprContext,
|
||||
node->scan.scanstate->cstate.cs_ExprContext,
|
||||
&isNull, &isDone);
|
||||
scan_keys[j].sk_argument = scanvalue;
|
||||
if (isNull)
|
||||
|
@ -750,7 +752,7 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
|
|||
clause = nth(j, qual);
|
||||
|
||||
op = (Oper *) clause->oper;
|
||||
if (!IsA(clause, Expr) || !IsA(op, Oper))
|
||||
if (!IsA(clause, Expr) ||!IsA(op, Oper))
|
||||
elog(ERROR, "ExecInitIndexScan: indxqual not an opclause!");
|
||||
|
||||
opid = op->opid;
|
||||
|
@ -801,7 +803,7 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
|
|||
|
||||
Assert(leftop != NULL);
|
||||
|
||||
if (IsA(leftop, Var) && var_is_rel((Var *) leftop))
|
||||
if (IsA(leftop, Var) &&var_is_rel((Var *) leftop))
|
||||
{
|
||||
/* ----------------
|
||||
* if the leftop is a "rel-var", then it means
|
||||
|
@ -884,7 +886,7 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
|
|||
|
||||
Assert(rightop != NULL);
|
||||
|
||||
if (IsA(rightop, Var) && var_is_rel((Var *) rightop))
|
||||
if (IsA(rightop, Var) &&var_is_rel((Var *) rightop))
|
||||
{
|
||||
/* ----------------
|
||||
* here we make sure only one op identifies the
|
||||
|
@ -1049,10 +1051,8 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
|
|||
¤tRelation, /* return: rel desc */
|
||||
(Pointer *) ¤tScanDesc); /* return: scan desc */
|
||||
|
||||
if (!RelationGetForm(currentRelation)->relhasindex)
|
||||
{
|
||||
elog(ERROR, "indexes of the relation %u was inactivated", reloid);
|
||||
}
|
||||
if (!RelationGetForm(currentRelation)->relhasindex)
|
||||
elog(ERROR, "indexes of the relation %u was inactivated", reloid);
|
||||
scanstate->css_currentRelation = currentRelation;
|
||||
scanstate->css_currentScanDesc = currentScanDesc;
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeSort.c,v 1.25 2000/01/26 05:56:23 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeSort.c,v 1.26 2000/04/12 17:15:09 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -126,7 +126,7 @@ ExecSort(Sort *node)
|
|||
* ----------------
|
||||
*/
|
||||
|
||||
if (! sortstate->sort_Done)
|
||||
if (!sortstate->sort_Done)
|
||||
{
|
||||
Plan *outerNode;
|
||||
TupleDesc tupDesc;
|
||||
|
@ -156,7 +156,7 @@ ExecSort(Sort *node)
|
|||
sortkeys = (ScanKey) sortstate->sort_Keys;
|
||||
|
||||
tuplesortstate = tuplesort_begin_heap(tupDesc, keycount, sortkeys,
|
||||
true /* randomAccess */);
|
||||
true /* randomAccess */ );
|
||||
|
||||
sortstate->tuplesortstate = (void *) tuplesortstate;
|
||||
|
||||
|
@ -371,7 +371,7 @@ ExecSortMarkPos(Sort *node)
|
|||
* if we haven't sorted yet, just return
|
||||
* ----------------
|
||||
*/
|
||||
if (! sortstate->sort_Done)
|
||||
if (!sortstate->sort_Done)
|
||||
return;
|
||||
|
||||
tuplesort_markpos((Tuplesortstate *) sortstate->tuplesortstate);
|
||||
|
@ -392,7 +392,7 @@ ExecSortRestrPos(Sort *node)
|
|||
* if we haven't sorted yet, just return.
|
||||
* ----------------
|
||||
*/
|
||||
if (! sortstate->sort_Done)
|
||||
if (!sortstate->sort_Done)
|
||||
return;
|
||||
|
||||
/* ----------------
|
||||
|
@ -412,14 +412,14 @@ ExecReScanSort(Sort *node, ExprContext *exprCtxt, Plan *parent)
|
|||
* not NULL then it will be re-scanned by ExecProcNode, else - no
|
||||
* reason to re-scan it at all.
|
||||
*/
|
||||
if (! sortstate->sort_Done)
|
||||
if (!sortstate->sort_Done)
|
||||
return;
|
||||
|
||||
ExecClearTuple(sortstate->csstate.cstate.cs_ResultTupleSlot);
|
||||
|
||||
/*
|
||||
* If subnode is to be rescanned then we forget previous sort
|
||||
* results; we have to re-read the subplan and re-sort.
|
||||
* If subnode is to be rescanned then we forget previous sort results;
|
||||
* we have to re-read the subplan and re-sort.
|
||||
*
|
||||
* Otherwise we can just rewind and rescan the sorted output.
|
||||
*/
|
||||
|
@ -430,7 +430,5 @@ ExecReScanSort(Sort *node, ExprContext *exprCtxt, Plan *parent)
|
|||
sortstate->tuplesortstate = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
tuplesort_rescan((Tuplesortstate *) sortstate->tuplesortstate);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeSubplan.c,v 1.24 2000/03/23 07:32:58 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeSubplan.c,v 1.25 2000/04/12 17:15:10 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -67,20 +67,20 @@ ExecSubPlan(SubPlan *node, List *pvar, ExprContext *econtext, bool *isNull)
|
|||
ExecReScan(plan, (ExprContext *) NULL, plan);
|
||||
|
||||
/*
|
||||
* For all sublink types except EXPR_SUBLINK, the result is boolean
|
||||
* as are the results of the combining operators. We combine results
|
||||
* For all sublink types except EXPR_SUBLINK, the result is boolean as
|
||||
* are the results of the combining operators. We combine results
|
||||
* within a tuple (if there are multiple columns) using OR semantics
|
||||
* if "useor" is true, AND semantics if not. We then combine results
|
||||
* across tuples (if the subplan produces more than one) using OR
|
||||
* semantics for ANY_SUBLINK or AND semantics for ALL_SUBLINK.
|
||||
* (MULTIEXPR_SUBLINK doesn't allow multiple tuples from the subplan.)
|
||||
* NULL results from the combining operators are handled according to
|
||||
* the usual SQL semantics for OR and AND. The result for no input
|
||||
* the usual SQL semantics for OR and AND. The result for no input
|
||||
* tuples is FALSE for ANY_SUBLINK, TRUE for ALL_SUBLINK, NULL for
|
||||
* MULTIEXPR_SUBLINK.
|
||||
*
|
||||
* For EXPR_SUBLINK we require the subplan to produce no more than one
|
||||
* tuple, else an error is raised. If zero tuples are produced, we
|
||||
* tuple, else an error is raised. If zero tuples are produced, we
|
||||
* return NULL. Assuming we get a tuple, we just return its first
|
||||
* column (there can be only one non-junk column in this case).
|
||||
*/
|
||||
|
@ -106,13 +106,14 @@ ExecSubPlan(SubPlan *node, List *pvar, ExprContext *econtext, bool *isNull)
|
|||
if (found)
|
||||
elog(ERROR, "More than one tuple returned by a subselect used as an expression.");
|
||||
found = true;
|
||||
|
||||
/*
|
||||
* We need to copy the subplan's tuple in case the result is of
|
||||
* pass-by-ref type --- our return value will point into this
|
||||
* copied tuple! Can't use the subplan's instance of the tuple
|
||||
* since it won't still be valid after next ExecProcNode() call.
|
||||
* node->curTuple keeps track of the copied tuple for eventual
|
||||
* freeing.
|
||||
* We need to copy the subplan's tuple in case the result is
|
||||
* of pass-by-ref type --- our return value will point into
|
||||
* this copied tuple! Can't use the subplan's instance of the
|
||||
* tuple since it won't still be valid after next
|
||||
* ExecProcNode() call. node->curTuple keeps track of the
|
||||
* copied tuple for eventual freeing.
|
||||
*/
|
||||
tup = heap_copytuple(tup);
|
||||
if (node->curTuple)
|
||||
|
@ -129,7 +130,8 @@ ExecSubPlan(SubPlan *node, List *pvar, ExprContext *econtext, bool *isNull)
|
|||
|
||||
found = true;
|
||||
|
||||
/* For ALL, ANY, and MULTIEXPR sublinks, iterate over combining
|
||||
/*
|
||||
* For ALL, ANY, and MULTIEXPR sublinks, iterate over combining
|
||||
* operators for columns of tuple.
|
||||
*/
|
||||
foreach(lst, sublink->oper)
|
||||
|
@ -140,14 +142,14 @@ ExecSubPlan(SubPlan *node, List *pvar, ExprContext *econtext, bool *isNull)
|
|||
bool expnull;
|
||||
|
||||
/*
|
||||
* The righthand side of the expression should be either a Const
|
||||
* or a function call or RelabelType node taking a Const as arg
|
||||
* (these nodes represent run-time type coercions inserted by
|
||||
* the parser to get to the input type needed by the operator).
|
||||
* Find the Const node and insert the actual righthand-side value
|
||||
* into it.
|
||||
* The righthand side of the expression should be either a
|
||||
* Const or a function call or RelabelType node taking a Const
|
||||
* as arg (these nodes represent run-time type coercions
|
||||
* inserted by the parser to get to the input type needed by
|
||||
* the operator). Find the Const node and insert the actual
|
||||
* righthand-side value into it.
|
||||
*/
|
||||
if (! IsA(con, Const))
|
||||
if (!IsA(con, Const))
|
||||
{
|
||||
switch (con->type)
|
||||
{
|
||||
|
@ -161,16 +163,18 @@ ExecSubPlan(SubPlan *node, List *pvar, ExprContext *econtext, bool *isNull)
|
|||
/* will fail below */
|
||||
break;
|
||||
}
|
||||
if (! IsA(con, Const))
|
||||
if (!IsA(con, Const))
|
||||
elog(ERROR, "ExecSubPlan: failed to find placeholder for subplan result");
|
||||
}
|
||||
con->constvalue = heap_getattr(tup, col, tdesc,
|
||||
&(con->constisnull));
|
||||
|
||||
/*
|
||||
* Now we can eval the combining operator for this column.
|
||||
*/
|
||||
expresult = ExecEvalExpr((Node *) expr, econtext, &expnull,
|
||||
(bool *) NULL);
|
||||
|
||||
/*
|
||||
* Combine the result into the row result as appropriate.
|
||||
*/
|
||||
|
@ -240,14 +244,16 @@ ExecSubPlan(SubPlan *node, List *pvar, ExprContext *econtext, bool *isNull)
|
|||
|
||||
if (!found)
|
||||
{
|
||||
/* deal with empty subplan result. result/isNull were previously
|
||||
|
||||
/*
|
||||
* deal with empty subplan result. result/isNull were previously
|
||||
* initialized correctly for all sublink types except EXPR and
|
||||
* MULTIEXPR; for those, return NULL.
|
||||
*/
|
||||
if (subLinkType == EXPR_SUBLINK || subLinkType == MULTIEXPR_SUBLINK)
|
||||
{
|
||||
result = (Datum) false;
|
||||
*isNull = true;
|
||||
result = (Datum) false;
|
||||
*isNull = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -354,9 +360,9 @@ ExecSetParamPlan(SubPlan *node)
|
|||
|
||||
/*
|
||||
* We need to copy the subplan's tuple in case any of the params
|
||||
* are pass-by-ref type --- the pointers stored in the param structs
|
||||
* will point at this copied tuple! node->curTuple keeps track
|
||||
* of the copied tuple for eventual freeing.
|
||||
* are pass-by-ref type --- the pointers stored in the param
|
||||
* structs will point at this copied tuple! node->curTuple keeps
|
||||
* track of the copied tuple for eventual freeing.
|
||||
*/
|
||||
tup = heap_copytuple(tup);
|
||||
if (node->curTuple)
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeTidscan.c,v 1.5 2000/04/07 00:30:41 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeTidscan.c,v 1.6 2000/04/12 17:15:10 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -32,21 +32,21 @@
|
|||
#include "access/heapam.h"
|
||||
#include "parser/parsetree.h"
|
||||
|
||||
static int TidListCreate(List *, ExprContext *, ItemPointer *);
|
||||
static int TidListCreate(List *, ExprContext *, ItemPointer *);
|
||||
static TupleTableSlot *TidNext(TidScan *node);
|
||||
|
||||
static int
|
||||
TidListCreate(List *evalList, ExprContext *econtext, ItemPointer *tidList)
|
||||
{
|
||||
List *lst;
|
||||
ItemPointer itemptr;
|
||||
List *lst;
|
||||
ItemPointer itemptr;
|
||||
bool isNull;
|
||||
int numTids = 0;
|
||||
int numTids = 0;
|
||||
|
||||
foreach (lst, evalList)
|
||||
foreach(lst, evalList)
|
||||
{
|
||||
itemptr = (ItemPointer)ExecEvalExpr(lfirst(lst), econtext,
|
||||
&isNull, (bool *)0);
|
||||
itemptr = (ItemPointer) ExecEvalExpr(lfirst(lst), econtext,
|
||||
&isNull, (bool *) 0);
|
||||
if (itemptr && ItemPointerIsValid(itemptr))
|
||||
{
|
||||
tidList[numTids] = itemptr;
|
||||
|
@ -67,20 +67,21 @@ TidListCreate(List *evalList, ExprContext *econtext, ItemPointer *tidList)
|
|||
static TupleTableSlot *
|
||||
TidNext(TidScan *node)
|
||||
{
|
||||
EState *estate;
|
||||
EState *estate;
|
||||
CommonScanState *scanstate;
|
||||
TidScanState *tidstate;
|
||||
ScanDirection direction;
|
||||
TidScanState *tidstate;
|
||||
ScanDirection direction;
|
||||
Snapshot snapshot;
|
||||
Relation heapRelation;
|
||||
HeapTuple tuple;
|
||||
TupleTableSlot *slot;
|
||||
TupleTableSlot *slot;
|
||||
Buffer buffer = InvalidBuffer;
|
||||
int numTids;
|
||||
int numTids;
|
||||
|
||||
bool bBackward;
|
||||
int tidNumber;
|
||||
ItemPointer *tidList, itemptr;
|
||||
int tidNumber;
|
||||
ItemPointer *tidList,
|
||||
itemptr;
|
||||
|
||||
/* ----------------
|
||||
* extract necessary information from tid scan node
|
||||
|
@ -108,7 +109,7 @@ TidNext(TidScan *node)
|
|||
ExecClearTuple(slot);
|
||||
if (estate->es_evTupleNull[node->scan.scanrelid - 1])
|
||||
return slot; /* return empty slot */
|
||||
|
||||
|
||||
/* probably ought to use ExecStoreTuple here... */
|
||||
slot->val = estate->es_evTuple[node->scan.scanrelid - 1];
|
||||
slot->ttc_shouldFree = false;
|
||||
|
@ -159,7 +160,7 @@ TidNext(TidScan *node)
|
|||
if (tuple->t_data != NULL)
|
||||
{
|
||||
bool prev_matches = false;
|
||||
int prev_tid;
|
||||
int prev_tid;
|
||||
|
||||
/* ----------------
|
||||
* store the scanned tuple in the scan tuple slot of
|
||||
|
@ -169,23 +170,23 @@ TidNext(TidScan *node)
|
|||
* were not created with palloc() and so should not be pfree()'d.
|
||||
* ----------------
|
||||
*/
|
||||
ExecStoreTuple(tuple, /* tuple to store */
|
||||
slot, /* slot to store in */
|
||||
buffer, /* buffer associated with tuple */
|
||||
false); /* don't pfree */
|
||||
ExecStoreTuple(tuple, /* tuple to store */
|
||||
slot,/* slot to store in */
|
||||
buffer, /* buffer associated with tuple */
|
||||
false); /* don't pfree */
|
||||
|
||||
/*
|
||||
* At this point we have an extra pin on the buffer,
|
||||
* because ExecStoreTuple incremented the pin count.
|
||||
* Drop our local pin.
|
||||
*/
|
||||
ReleaseBuffer(buffer);
|
||||
* At this point we have an extra pin on the buffer, because
|
||||
* ExecStoreTuple incremented the pin count. Drop our local
|
||||
* pin.
|
||||
*/
|
||||
ReleaseBuffer(buffer);
|
||||
|
||||
/*
|
||||
* We must check to see if the current tuple would have
|
||||
* been matched by an earlier tid, so we don't double
|
||||
* report it. We do this by passing the tuple through
|
||||
* ExecQual and look for failure with all previous
|
||||
* qualifications.
|
||||
* We must check to see if the current tuple would have been
|
||||
* matched by an earlier tid, so we don't double report it. We
|
||||
* do this by passing the tuple through ExecQual and look for
|
||||
* failure with all previous qualifications.
|
||||
*/
|
||||
for (prev_tid = 0; prev_tid < tidstate->tss_TidPtr;
|
||||
prev_tid++)
|
||||
|
@ -209,7 +210,7 @@ TidNext(TidScan *node)
|
|||
else
|
||||
tidstate->tss_TidPtr++;
|
||||
if (slot_is_valid)
|
||||
return slot;
|
||||
return slot;
|
||||
}
|
||||
/* ----------------
|
||||
* if we get here it means the tid scan failed so we
|
||||
|
@ -255,9 +256,9 @@ ExecTidScan(TidScan *node)
|
|||
void
|
||||
ExecTidReScan(TidScan *node, ExprContext *exprCtxt, Plan *parent)
|
||||
{
|
||||
EState *estate;
|
||||
TidScanState *tidstate;
|
||||
ItemPointer *tidList;
|
||||
EState *estate;
|
||||
TidScanState *tidstate;
|
||||
ItemPointer *tidList;
|
||||
|
||||
tidstate = node->tidstate;
|
||||
estate = node->scan.plan.state;
|
||||
|
@ -278,7 +279,7 @@ ExecTidReScan(TidScan *node, ExprContext *exprCtxt, Plan *parent)
|
|||
}
|
||||
|
||||
tidstate->tss_NumTids = TidListCreate(node->tideval,
|
||||
node->scan.scanstate->cstate.cs_ExprContext,
|
||||
node->scan.scanstate->cstate.cs_ExprContext,
|
||||
tidList);
|
||||
|
||||
/* ----------------
|
||||
|
@ -299,7 +300,7 @@ void
|
|||
ExecEndTidScan(TidScan *node)
|
||||
{
|
||||
CommonScanState *scanstate;
|
||||
TidScanState *tidstate;
|
||||
TidScanState *tidstate;
|
||||
|
||||
scanstate = node->scan.scanstate;
|
||||
tidstate = node->tidstate;
|
||||
|
@ -385,18 +386,18 @@ ExecTidRestrPos(TidScan *node)
|
|||
bool
|
||||
ExecInitTidScan(TidScan *node, EState *estate, Plan *parent)
|
||||
{
|
||||
TidScanState *tidstate;
|
||||
TidScanState *tidstate;
|
||||
CommonScanState *scanstate;
|
||||
ItemPointer *tidList;
|
||||
int numTids;
|
||||
int tidPtr;
|
||||
List *rangeTable;
|
||||
RangeTblEntry *rtentry;
|
||||
Oid relid;
|
||||
Oid reloid;
|
||||
ItemPointer *tidList;
|
||||
int numTids;
|
||||
int tidPtr;
|
||||
List *rangeTable;
|
||||
RangeTblEntry *rtentry;
|
||||
Oid relid;
|
||||
Oid reloid;
|
||||
|
||||
Relation currentRelation;
|
||||
int baseid;
|
||||
int baseid;
|
||||
|
||||
List *execParam = NULL;
|
||||
|
||||
|
@ -473,7 +474,7 @@ ExecInitTidScan(TidScan *node, EState *estate, Plan *parent)
|
|||
* get the tid node information
|
||||
* ----------------
|
||||
*/
|
||||
tidList = (ItemPointer *)palloc(length(node->tideval) * sizeof(ItemPointer));
|
||||
tidList = (ItemPointer *) palloc(length(node->tideval) * sizeof(ItemPointer));
|
||||
numTids = 0;
|
||||
if (!node->needRescan)
|
||||
numTids = TidListCreate(node->tideval, scanstate->cstate.cs_ExprContext, tidList);
|
||||
|
@ -502,8 +503,8 @@ ExecInitTidScan(TidScan *node, EState *estate, Plan *parent)
|
|||
reloid = rtentry->relid;
|
||||
|
||||
currentRelation = heap_open(reloid, AccessShareLock);
|
||||
if (currentRelation == NULL)
|
||||
elog(ERROR, "ExecInitTidScan heap_open failed.");
|
||||
if (currentRelation == NULL)
|
||||
elog(ERROR, "ExecInitTidScan heap_open failed.");
|
||||
scanstate->css_currentRelation = currentRelation;
|
||||
scanstate->css_currentScanDesc = 0;
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeUnique.c,v 1.27 2000/01/27 18:11:27 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeUnique.c,v 1.28 2000/04/12 17:15:10 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -55,7 +55,7 @@ ExecUnique(Unique *node)
|
|||
uniquestate = node->uniquestate;
|
||||
outerPlan = outerPlan((Plan *) node);
|
||||
resultTupleSlot = uniquestate->cstate.cs_ResultTupleSlot;
|
||||
tupDesc = ExecGetResultType(& uniquestate->cstate);
|
||||
tupDesc = ExecGetResultType(&uniquestate->cstate);
|
||||
|
||||
/* ----------------
|
||||
* now loop, returning only non-duplicate tuples.
|
||||
|
@ -86,16 +86,16 @@ ExecUnique(Unique *node)
|
|||
* another new tuple from the subplan.
|
||||
* ----------------
|
||||
*/
|
||||
if (! execTuplesMatch(slot->val, uniquestate->priorTuple,
|
||||
tupDesc,
|
||||
node->numCols, node->uniqColIdx,
|
||||
uniquestate->eqfunctions))
|
||||
if (!execTuplesMatch(slot->val, uniquestate->priorTuple,
|
||||
tupDesc,
|
||||
node->numCols, node->uniqColIdx,
|
||||
uniquestate->eqfunctions))
|
||||
break;
|
||||
}
|
||||
|
||||
/* ----------------
|
||||
* We have a new tuple different from the previous saved tuple (if any).
|
||||
* Save it and return it. Note that we make two copies of the tuple:
|
||||
* Save it and return it. Note that we make two copies of the tuple:
|
||||
* one to keep for our own future comparisons, and one to return to the
|
||||
* caller. We need to copy the tuple returned by the subplan to avoid
|
||||
* holding buffer refcounts, and we need our own copy because the caller
|
||||
|
@ -151,14 +151,14 @@ ExecInitUnique(Unique *node, EState *estate, Plan *parent)
|
|||
* they never call ExecQual or ExecTargetList.
|
||||
* ----------------
|
||||
*/
|
||||
ExecAssignNodeBaseInfo(estate, & uniquestate->cstate, parent);
|
||||
ExecAssignNodeBaseInfo(estate, &uniquestate->cstate, parent);
|
||||
|
||||
#define UNIQUE_NSLOTS 1
|
||||
/* ------------
|
||||
* Tuple table initialization
|
||||
* ------------
|
||||
*/
|
||||
ExecInitResultTupleSlot(estate, & uniquestate->cstate);
|
||||
ExecInitResultTupleSlot(estate, &uniquestate->cstate);
|
||||
|
||||
/* ----------------
|
||||
* then initialize outer plan
|
||||
|
@ -172,14 +172,14 @@ ExecInitUnique(Unique *node, EState *estate, Plan *parent)
|
|||
* projection info for this node appropriately
|
||||
* ----------------
|
||||
*/
|
||||
ExecAssignResultTypeFromOuterPlan((Plan *) node, & uniquestate->cstate);
|
||||
ExecAssignResultTypeFromOuterPlan((Plan *) node, &uniquestate->cstate);
|
||||
uniquestate->cstate.cs_ProjInfo = NULL;
|
||||
|
||||
/*
|
||||
* Precompute fmgr lookup data for inner loop
|
||||
*/
|
||||
uniquestate->eqfunctions =
|
||||
execTuplesMatchPrepare(ExecGetResultType(& uniquestate->cstate),
|
||||
execTuplesMatchPrepare(ExecGetResultType(&uniquestate->cstate),
|
||||
node->numCols,
|
||||
node->uniqColIdx);
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/lib/dllist.c,v 1.16 2000/01/26 05:56:26 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/lib/dllist.c,v 1.17 2000/04/12 17:15:10 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -128,14 +128,16 @@ DLRemove(Dlelem *e)
|
|||
|
||||
if (e->dle_prev)
|
||||
e->dle_prev->dle_next = e->dle_next;
|
||||
else /* must be the head element */
|
||||
else
|
||||
/* must be the head element */
|
||||
{
|
||||
Assert(e == l->dll_head);
|
||||
l->dll_head = e->dle_next;
|
||||
}
|
||||
if (e->dle_next)
|
||||
e->dle_next->dle_prev = e->dle_prev;
|
||||
else /* must be the tail element */
|
||||
else
|
||||
/* must be the tail element */
|
||||
{
|
||||
Assert(e == l->dll_tail);
|
||||
l->dll_tail = e->dle_prev;
|
||||
|
@ -236,7 +238,8 @@ DLMoveToFront(Dlelem *e)
|
|||
|
||||
if (e->dle_next)
|
||||
e->dle_next->dle_prev = e->dle_prev;
|
||||
else /* must be the tail element */
|
||||
else
|
||||
/* must be the tail element */
|
||||
{
|
||||
Assert(e == l->dll_tail);
|
||||
l->dll_tail = e->dle_prev;
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
* Portions Copyright (c) 1996-2000, PostgreSQL, Inc
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Id: stringinfo.c,v 1.24 2000/01/26 05:56:26 momjian Exp $
|
||||
* $Id: stringinfo.c,v 1.25 2000/04/12 17:15:11 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -85,7 +85,7 @@ enlargeStringInfo(StringInfo str, int needed)
|
|||
str->data = (char *) repalloc(str->data, newlen);
|
||||
if (str->data == NULL)
|
||||
elog(ERROR,
|
||||
"enlargeStringInfo: Out of memory (%d bytes requested)", newlen);
|
||||
"enlargeStringInfo: Out of memory (%d bytes requested)", newlen);
|
||||
|
||||
str->maxlen = newlen;
|
||||
}
|
||||
|
@ -122,12 +122,13 @@ appendStringInfo(StringInfo str, const char *fmt,...)
|
|||
nprinted = vsnprintf(str->data + str->len, avail,
|
||||
fmt, args);
|
||||
va_end(args);
|
||||
|
||||
/*
|
||||
* Note: some versions of vsnprintf return the number of chars
|
||||
* actually stored, but at least one returns -1 on failure.
|
||||
* Be conservative about believing whether the print worked.
|
||||
* actually stored, but at least one returns -1 on failure. Be
|
||||
* conservative about believing whether the print worked.
|
||||
*/
|
||||
if (nprinted >= 0 && nprinted < avail-1)
|
||||
if (nprinted >= 0 && nprinted < avail - 1)
|
||||
{
|
||||
/* Success. Note nprinted does not include trailing null. */
|
||||
str->len += nprinted;
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/auth.c,v 1.43 2000/01/26 05:56:28 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/auth.c,v 1.44 2000/04/12 17:15:13 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -48,7 +48,7 @@ static int pg_passwordv0_recvauth(void *arg, PacketLen len, void *pkt);
|
|||
static int checkPassword(Port *port, char *user, char *password);
|
||||
static int old_be_recvauth(Port *port);
|
||||
static int map_old_to_new(Port *port, UserAuth old, int status);
|
||||
static void auth_failed(Port *port);
|
||||
static void auth_failed(Port *port);
|
||||
|
||||
|
||||
#ifdef KRB4
|
||||
|
@ -130,7 +130,7 @@ static int
|
|||
pg_krb4_recvauth(Port *port)
|
||||
{
|
||||
snprintf(PQerrormsg, PQERRORMSG_LENGTH,
|
||||
"pg_krb4_recvauth: Kerberos not implemented on this server.\n");
|
||||
"pg_krb4_recvauth: Kerberos not implemented on this server.\n");
|
||||
fputs(PQerrormsg, stderr);
|
||||
pqdebug("%s", PQerrormsg);
|
||||
|
||||
|
@ -455,7 +455,7 @@ be_recvauth(Port *port)
|
|||
* an error message into the postmaster logfile if it failed.
|
||||
*/
|
||||
|
||||
if (hba_getauthmethod(port) != STATUS_OK)
|
||||
if (hba_getauthmethod(port) != STATUS_OK)
|
||||
PacketSendError(&port->pktInfo,
|
||||
"Missing or erroneous pg_hba.conf file, see postmaster log for details");
|
||||
|
||||
|
@ -472,6 +472,7 @@ be_recvauth(Port *port)
|
|||
|
||||
AuthRequest areq = AUTH_REQ_OK;
|
||||
PacketDoneProc auth_handler = NULL;
|
||||
|
||||
switch (port->auth_method)
|
||||
{
|
||||
case uaReject:
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
* Portions Copyright (c) 1996-2000, PostgreSQL, Inc
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Id: be-dumpdata.c,v 1.32 2000/01/26 05:56:28 momjian Exp $
|
||||
* $Id: be-dumpdata.c,v 1.33 2000/04/12 17:15:14 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -184,7 +184,7 @@ be_typeinit(PortalEntry *entry,
|
|||
for (i = 0; i < natts; ++i)
|
||||
{
|
||||
strncpy(group->types[i].name,
|
||||
NameStr(attrs[i]->attname), NAMEDATALEN);
|
||||
NameStr(attrs[i]->attname), NAMEDATALEN);
|
||||
group->types[i].typid = attrs[i]->atttypid;
|
||||
group->types[i].typlen = attrs[i]->attlen;
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/be-fsstubs.c,v 1.43 2000/01/26 05:56:28 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/be-fsstubs.c,v 1.44 2000/04/12 17:15:14 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* This should be moved to a more appropriate place. It is here
|
||||
|
@ -18,7 +18,7 @@
|
|||
*
|
||||
* These functions operate in a private GlobalMemoryContext, which means
|
||||
* that large object descriptors hang around until we destroy the context.
|
||||
* That happens in lo_commit(). It'd be possible to prolong the lifetime
|
||||
* That happens in lo_commit(). It'd be possible to prolong the lifetime
|
||||
* of the context so that LO FDs are good across transactions (for example,
|
||||
* we could release the context only if we see that no FDs remain open).
|
||||
* But we'd need additional state in order to do the right thing at the
|
||||
|
@ -259,9 +259,9 @@ lo_tell(int fd)
|
|||
}
|
||||
|
||||
/*
|
||||
* We assume we do not need to switch contexts for inv_tell.
|
||||
* That is true for now, but is probably more than this module
|
||||
* ought to assume...
|
||||
* We assume we do not need to switch contexts for inv_tell. That is
|
||||
* true for now, but is probably more than this module ought to
|
||||
* assume...
|
||||
*/
|
||||
return inv_tell(cookies[fd]);
|
||||
}
|
||||
|
@ -269,10 +269,11 @@ lo_tell(int fd)
|
|||
int
|
||||
lo_unlink(Oid lobjId)
|
||||
{
|
||||
|
||||
/*
|
||||
* inv_drop does not need a context switch, indeed it doesn't
|
||||
* touch any LO-specific data structures at all. (Again, that's
|
||||
* probably more than this module ought to be assuming.)
|
||||
* inv_drop does not need a context switch, indeed it doesn't touch
|
||||
* any LO-specific data structures at all. (Again, that's probably
|
||||
* more than this module ought to be assuming.)
|
||||
*
|
||||
* XXX there ought to be some code to clean up any open LOs that
|
||||
* reference the specified relation... as is, they remain "open".
|
||||
|
@ -417,9 +418,9 @@ lo_export(Oid lobjId, text *filename)
|
|||
/*
|
||||
* open the file to be written to
|
||||
*
|
||||
* Note: we reduce backend's normal 077 umask to the slightly
|
||||
* friendlier 022. This code used to drop it all the way to 0,
|
||||
* but creating world-writable export files doesn't seem wise.
|
||||
* Note: we reduce backend's normal 077 umask to the slightly friendlier
|
||||
* 022. This code used to drop it all the way to 0, but creating
|
||||
* world-writable export files doesn't seem wise.
|
||||
*/
|
||||
nbytes = VARSIZE(filename) - VARHDRSZ + 1;
|
||||
if (nbytes > FNAME_BUFSIZE)
|
||||
|
@ -470,8 +471,9 @@ lo_commit(bool isCommit)
|
|||
|
||||
currentContext = MemoryContextSwitchTo((MemoryContext) fscxt);
|
||||
|
||||
/* Clean out still-open index scans (not necessary if aborting)
|
||||
* and clear cookies array so that LO fds are no longer good.
|
||||
/*
|
||||
* Clean out still-open index scans (not necessary if aborting) and
|
||||
* clear cookies array so that LO fds are no longer good.
|
||||
*/
|
||||
for (i = 0; i < MAX_LOBJ_FDS; i++)
|
||||
{
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
* wherein you authenticate a user by seeing what IP address the system
|
||||
* says he comes from and possibly using ident).
|
||||
*
|
||||
* $Id: hba.c,v 1.50 2000/03/17 02:36:08 tgl Exp $
|
||||
* $Id: hba.c,v 1.51 2000/04/12 17:15:14 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -209,31 +209,32 @@ process_hba_record(FILE *file, hbaPort *port, bool *matches_p, bool *error_p)
|
|||
*/
|
||||
|
||||
if ((strcmp(db, port->database) != 0 && strcmp(db, "all") != 0 &&
|
||||
(strcmp(db, "sameuser") != 0 || strcmp(port->database, port->user) != 0)) ||
|
||||
port->raddr.sa.sa_family != AF_UNIX)
|
||||
(strcmp(db, "sameuser") != 0 || strcmp(port->database, port->user) != 0)) ||
|
||||
port->raddr.sa.sa_family != AF_UNIX)
|
||||
return;
|
||||
}
|
||||
else if (strcmp(buf, "host") == 0 || strcmp(buf, "hostssl") == 0)
|
||||
{
|
||||
struct in_addr file_ip_addr,
|
||||
mask;
|
||||
bool discard = 0; /* Discard this entry */
|
||||
bool discard = 0;/* Discard this entry */
|
||||
|
||||
#ifdef USE_SSL
|
||||
/* If SSL, then check that we are on SSL */
|
||||
if (strcmp(buf, "hostssl") == 0) {
|
||||
if (!port->ssl)
|
||||
discard = 1;
|
||||
|
||||
/* Placeholder to require specific SSL level, perhaps? */
|
||||
/* Or a client certificate */
|
||||
if (strcmp(buf, "hostssl") == 0)
|
||||
{
|
||||
if (!port->ssl)
|
||||
discard = 1;
|
||||
|
||||
/* Since we were on SSL, proceed as with normal 'host' mode */
|
||||
/* Placeholder to require specific SSL level, perhaps? */
|
||||
/* Or a client certificate */
|
||||
|
||||
/* Since we were on SSL, proceed as with normal 'host' mode */
|
||||
}
|
||||
#else
|
||||
/* If not SSL, we don't support this */
|
||||
if (strcmp(buf,"hostssl") == 0)
|
||||
goto syntax;
|
||||
if (strcmp(buf, "hostssl") == 0)
|
||||
goto syntax;
|
||||
#endif
|
||||
|
||||
/* Get the database. */
|
||||
|
@ -286,7 +287,7 @@ process_hba_record(FILE *file, hbaPort *port, bool *matches_p, bool *error_p)
|
|||
* "out of sync" with the file.
|
||||
*/
|
||||
if (discard)
|
||||
return;
|
||||
return;
|
||||
|
||||
/*
|
||||
* If this record isn't for our database, or this is the wrong
|
||||
|
@ -294,8 +295,8 @@ process_hba_record(FILE *file, hbaPort *port, bool *matches_p, bool *error_p)
|
|||
*/
|
||||
|
||||
if ((strcmp(db, port->database) != 0 && strcmp(db, "all") != 0 &&
|
||||
(strcmp(db, "sameuser") != 0 || strcmp(port->database, port->user) != 0)) ||
|
||||
port->raddr.sa.sa_family != AF_INET ||
|
||||
(strcmp(db, "sameuser") != 0 || strcmp(port->database, port->user) != 0)) ||
|
||||
port->raddr.sa.sa_family != AF_INET ||
|
||||
((file_ip_addr.s_addr ^ port->raddr.in.sin_addr.s_addr) & mask.s_addr) != 0x0000)
|
||||
return;
|
||||
}
|
||||
|
@ -353,7 +354,7 @@ process_open_config_file(FILE *file, hbaPort *port, bool *hba_ok_p)
|
|||
/* If no matching entry was found, synthesize 'reject' entry. */
|
||||
|
||||
if (!found_entry)
|
||||
port->auth_method = uaReject;
|
||||
port->auth_method = uaReject;
|
||||
|
||||
*hba_ok_p = true;
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/Attic/portalbuf.c,v 1.23 2000/03/17 02:36:08 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/Attic/portalbuf.c,v 1.24 2000/04/12 17:15:14 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -88,7 +88,7 @@ portals_realloc(size_t size)
|
|||
portals = newp;
|
||||
else
|
||||
libpq_raise(&PortalError,
|
||||
vararg_format("Cannot alloc more memory in portals_realloc"));
|
||||
vararg_format("Cannot alloc more memory in portals_realloc"));
|
||||
|
||||
for (i = oldsize; i < (int) portals_array_size; i++)
|
||||
portals[i] = (PortalEntry *) NULL;
|
||||
|
@ -461,7 +461,7 @@ pbuf_findGroup(PortalBuffer *portal,
|
|||
|
||||
if (group == NULL)
|
||||
libpq_raise(&PortalError,
|
||||
vararg_format("Group index %d out of bound.", group_index));
|
||||
vararg_format("Group index %d out of bound.", group_index));
|
||||
|
||||
return group;
|
||||
}
|
||||
|
@ -484,7 +484,7 @@ pbuf_findFnumber(GroupBuffer *group,
|
|||
return i;
|
||||
|
||||
libpq_raise(&PortalError,
|
||||
vararg_format("Field-name %s does not exist.", field_name));
|
||||
vararg_format("Field-name %s does not exist.", field_name));
|
||||
|
||||
/* not reached, here to make compiler happy */
|
||||
return 0;
|
||||
|
@ -501,7 +501,7 @@ pbuf_checkFnumber(GroupBuffer *group,
|
|||
{
|
||||
if (field_number < 0 || field_number >= group->no_fields)
|
||||
libpq_raise(&PortalError,
|
||||
vararg_format("Field number %d out of bound.", field_number));
|
||||
vararg_format("Field number %d out of bound.", field_number));
|
||||
}
|
||||
|
||||
/* --------------------------------
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
* Portions Copyright (c) 1996-2000, PostgreSQL, Inc
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Id: pqcomm.c,v 1.87 2000/01/26 05:56:29 momjian Exp $
|
||||
* $Id: pqcomm.c,v 1.88 2000/04/12 17:15:14 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -75,7 +75,7 @@
|
|||
#include "postgres.h"
|
||||
|
||||
#include "libpq/libpq.h"
|
||||
#include "utils/trace.h" /* needed for HAVE_FCNTL_SETLK */
|
||||
#include "utils/trace.h" /* needed for HAVE_FCNTL_SETLK */
|
||||
#include "miscadmin.h"
|
||||
|
||||
|
||||
|
@ -270,7 +270,7 @@ StreamServerPort(char *hostName, unsigned short portName, int *fdP)
|
|||
{
|
||||
snprintf(PQerrormsg, PQERRORMSG_LENGTH,
|
||||
"FATAL: StreamServerPort: bind() failed: %s\n"
|
||||
"\tIs another postmaster already running on that port?\n",
|
||||
"\tIs another postmaster already running on that port?\n",
|
||||
strerror(errno));
|
||||
if (family == AF_UNIX)
|
||||
snprintf(PQerrormsg + strlen(PQerrormsg),
|
||||
|
@ -438,15 +438,15 @@ pq_recvbuf(void)
|
|||
for (;;)
|
||||
{
|
||||
int r;
|
||||
|
||||
|
||||
#ifdef USE_SSL
|
||||
if (MyProcPort->ssl)
|
||||
r = SSL_read(MyProcPort->ssl, PqRecvBuffer + PqRecvLength,
|
||||
PQ_BUFFER_SIZE - PqRecvLength);
|
||||
r = SSL_read(MyProcPort->ssl, PqRecvBuffer + PqRecvLength,
|
||||
PQ_BUFFER_SIZE - PqRecvLength);
|
||||
else
|
||||
#endif
|
||||
r = recv(MyProcPort->sock, PqRecvBuffer + PqRecvLength,
|
||||
PQ_BUFFER_SIZE - PqRecvLength, 0);
|
||||
r = recv(MyProcPort->sock, PqRecvBuffer + PqRecvLength,
|
||||
PQ_BUFFER_SIZE - PqRecvLength, 0);
|
||||
|
||||
if (r < 0)
|
||||
{
|
||||
|
@ -561,9 +561,7 @@ pq_getstring(StringInfo s)
|
|||
|
||||
/* Read until we get the terminating '\0' */
|
||||
while ((c = pq_getbyte()) != EOF && c != '\0')
|
||||
{
|
||||
appendStringInfoChar(s, c);
|
||||
}
|
||||
|
||||
if (c == EOF)
|
||||
return EOF;
|
||||
|
@ -614,12 +612,13 @@ pq_flush(void)
|
|||
while (bufptr < bufend)
|
||||
{
|
||||
int r;
|
||||
|
||||
#ifdef USE_SSL
|
||||
if (MyProcPort->ssl)
|
||||
r = SSL_write(MyProcPort->ssl, bufptr, bufend - bufptr);
|
||||
r = SSL_write(MyProcPort->ssl, bufptr, bufend - bufptr);
|
||||
else
|
||||
#endif
|
||||
r = send(MyProcPort->sock, bufptr, bufend - bufptr, 0);
|
||||
r = send(MyProcPort->sock, bufptr, bufend - bufptr, 0);
|
||||
|
||||
if (r <= 0)
|
||||
{
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
* Portions Copyright (c) 1996-2000, PostgreSQL, Inc
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Id: pqformat.c,v 1.12 2000/01/26 05:56:29 momjian Exp $
|
||||
* $Id: pqformat.c,v 1.13 2000/04/12 17:15:14 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -156,6 +156,7 @@ void
|
|||
pq_sendstring(StringInfo buf, const char *str)
|
||||
{
|
||||
int slen = strlen(str);
|
||||
|
||||
#ifdef MULTIBYTE
|
||||
char *p;
|
||||
|
||||
|
@ -237,13 +238,15 @@ int
|
|||
pq_puttextmessage(char msgtype, const char *str)
|
||||
{
|
||||
int slen = strlen(str);
|
||||
|
||||
#ifdef MULTIBYTE
|
||||
char *p;
|
||||
|
||||
p = (char *) pg_server_to_client((unsigned char *) str, slen);
|
||||
if (p != str) /* actual conversion has been done? */
|
||||
{
|
||||
int result = pq_putmessage(msgtype, p, strlen(p) + 1);
|
||||
int result = pq_putmessage(msgtype, p, strlen(p) + 1);
|
||||
|
||||
pfree(p);
|
||||
return result;
|
||||
}
|
||||
|
@ -308,8 +311,10 @@ int
|
|||
pq_getstr(StringInfo s)
|
||||
{
|
||||
int result;
|
||||
|
||||
#ifdef MULTIBYTE
|
||||
char *p;
|
||||
|
||||
#endif
|
||||
|
||||
result = pq_getstring(s);
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/Attic/pqpacket.c,v 1.25 2000/03/19 22:10:07 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/Attic/pqpacket.c,v 1.26 2000/04/12 17:15:14 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -54,17 +54,17 @@ int
|
|||
PacketReceiveFragment(Port *port)
|
||||
{
|
||||
int got;
|
||||
Packet *pkt = &port->pktInfo;
|
||||
Packet *pkt = &port->pktInfo;
|
||||
|
||||
#ifdef USE_SSL
|
||||
if (port->ssl)
|
||||
got = SSL_read(port->ssl, pkt->ptr, pkt->nrtodo);
|
||||
if (port->ssl)
|
||||
got = SSL_read(port->ssl, pkt->ptr, pkt->nrtodo);
|
||||
else
|
||||
#endif
|
||||
got = read(port->sock, pkt->ptr, pkt->nrtodo);
|
||||
got = read(port->sock, pkt->ptr, pkt->nrtodo);
|
||||
if (got > 0)
|
||||
{
|
||||
pkt->nrtodo -= got;
|
||||
pkt->nrtodo -= got;
|
||||
pkt->ptr += got;
|
||||
|
||||
/* See if we have got what we need for the packet length. */
|
||||
|
@ -143,14 +143,14 @@ int
|
|||
PacketSendFragment(Port *port)
|
||||
{
|
||||
int done;
|
||||
Packet *pkt = &port->pktInfo;
|
||||
Packet *pkt = &port->pktInfo;
|
||||
|
||||
#ifdef USE_SSL
|
||||
if (port->ssl)
|
||||
done = SSL_write(port->ssl, pkt->ptr, pkt->nrtodo);
|
||||
if (port->ssl)
|
||||
done = SSL_write(port->ssl, pkt->ptr, pkt->nrtodo);
|
||||
else
|
||||
#endif
|
||||
done = write(port->sock, pkt->ptr, pkt->nrtodo);
|
||||
done = write(port->sock, pkt->ptr, pkt->nrtodo);
|
||||
|
||||
if (done > 0)
|
||||
{
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/copyfuncs.c,v 1.112 2000/04/08 00:21:15 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/copyfuncs.c,v 1.113 2000/04/12 17:15:16 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -143,7 +143,7 @@ _copyResult(Result *from)
|
|||
*/
|
||||
if (from->plan.subPlan != NIL)
|
||||
newnode->plan.subPlan = nconc(newnode->plan.subPlan,
|
||||
pull_subplans(newnode->resconstantqual));
|
||||
pull_subplans(newnode->resconstantqual));
|
||||
|
||||
return newnode;
|
||||
}
|
||||
|
@ -259,25 +259,25 @@ _copyIndexScan(IndexScan *from)
|
|||
if (from->scan.plan.subPlan != NIL)
|
||||
{
|
||||
newnode->scan.plan.subPlan = nconc(newnode->scan.plan.subPlan,
|
||||
pull_subplans((Node *) newnode->indxqual));
|
||||
pull_subplans((Node *) newnode->indxqual));
|
||||
newnode->scan.plan.subPlan = nconc(newnode->scan.plan.subPlan,
|
||||
pull_subplans((Node *) newnode->indxqualorig));
|
||||
pull_subplans((Node *) newnode->indxqualorig));
|
||||
}
|
||||
|
||||
return newnode;
|
||||
}
|
||||
|
||||
/* ----------------
|
||||
* _copyTidScan
|
||||
* _copyTidScan
|
||||
* ----------------
|
||||
*/
|
||||
static TidScan *
|
||||
_copyTidScan(TidScan *from)
|
||||
{
|
||||
TidScan *newnode = makeNode(TidScan);
|
||||
TidScan *newnode = makeNode(TidScan);
|
||||
|
||||
/* ----------------
|
||||
* copy node superclass fields
|
||||
* copy node superclass fields
|
||||
* ----------------
|
||||
*/
|
||||
CopyPlanFields((Plan *) from, (Plan *) newnode);
|
||||
|
@ -292,7 +292,7 @@ _copyTidScan(TidScan *from)
|
|||
return newnode;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* ----------------
|
||||
* CopyJoinFields
|
||||
*
|
||||
|
@ -375,7 +375,7 @@ _copyMergeJoin(MergeJoin *from)
|
|||
*/
|
||||
if (from->join.subPlan != NIL)
|
||||
newnode->join.subPlan = nconc(newnode->join.subPlan,
|
||||
pull_subplans((Node *) newnode->mergeclauses));
|
||||
pull_subplans((Node *) newnode->mergeclauses));
|
||||
|
||||
return newnode;
|
||||
}
|
||||
|
@ -408,7 +408,7 @@ _copyHashJoin(HashJoin *from)
|
|||
*/
|
||||
if (from->join.subPlan != NIL)
|
||||
newnode->join.subPlan = nconc(newnode->join.subPlan,
|
||||
pull_subplans((Node *) newnode->hashclauses));
|
||||
pull_subplans((Node *) newnode->hashclauses));
|
||||
|
||||
return newnode;
|
||||
}
|
||||
|
@ -871,7 +871,7 @@ _copyAggref(Aggref *from)
|
|||
newnode->usenulls = from->usenulls;
|
||||
newnode->aggstar = from->aggstar;
|
||||
newnode->aggdistinct = from->aggdistinct;
|
||||
newnode->aggno = from->aggno; /* probably not needed */
|
||||
newnode->aggno = from->aggno; /* probably not needed */
|
||||
|
||||
return newnode;
|
||||
}
|
||||
|
@ -905,7 +905,7 @@ _copySubLink(SubLink *from)
|
|||
static RelabelType *
|
||||
_copyRelabelType(RelabelType *from)
|
||||
{
|
||||
RelabelType *newnode = makeNode(RelabelType);
|
||||
RelabelType *newnode = makeNode(RelabelType);
|
||||
|
||||
/* ----------------
|
||||
* copy remainder of node
|
||||
|
@ -1108,6 +1108,7 @@ _copyIndexOptInfo(IndexOptInfo *from)
|
|||
static void
|
||||
CopyPathFields(Path *from, Path *newnode)
|
||||
{
|
||||
|
||||
/*
|
||||
* Modify the next line, since it causes the copying to cycle (i.e.
|
||||
* the parent points right back here! -- JMH, 7/7/92. Old version:
|
||||
|
@ -1166,13 +1167,13 @@ _copyIndexPath(IndexPath *from)
|
|||
}
|
||||
|
||||
/* ----------------
|
||||
* _copyTidPath
|
||||
* _copyTidPath
|
||||
* ----------------
|
||||
*/
|
||||
static TidPath *
|
||||
_copyTidPath(TidPath *from)
|
||||
{
|
||||
TidPath *newnode = makeNode(TidPath);
|
||||
TidPath *newnode = makeNode(TidPath);
|
||||
|
||||
/* ----------------
|
||||
* copy the node superclass fields
|
||||
|
@ -1189,6 +1190,7 @@ _copyTidPath(TidPath *from)
|
|||
|
||||
return newnode;
|
||||
}
|
||||
|
||||
/* ----------------
|
||||
* CopyJoinPathFields
|
||||
*
|
||||
|
@ -1282,7 +1284,7 @@ _copyHashPath(HashPath *from)
|
|||
static PathKeyItem *
|
||||
_copyPathKeyItem(PathKeyItem *from)
|
||||
{
|
||||
PathKeyItem *newnode = makeNode(PathKeyItem);
|
||||
PathKeyItem *newnode = makeNode(PathKeyItem);
|
||||
|
||||
/* ----------------
|
||||
* copy remainder of node
|
||||
|
@ -1497,8 +1499,8 @@ _copyQuery(Query *from)
|
|||
|
||||
/*
|
||||
* We do not copy the planner internal fields: base_rel_list,
|
||||
* join_rel_list, equi_key_list, query_pathkeys.
|
||||
* Not entirely clear if this is right?
|
||||
* join_rel_list, equi_key_list, query_pathkeys. Not entirely clear if
|
||||
* this is right?
|
||||
*/
|
||||
|
||||
return newnode;
|
||||
|
@ -1507,7 +1509,7 @@ _copyQuery(Query *from)
|
|||
static ClosePortalStmt *
|
||||
_copyClosePortalStmt(ClosePortalStmt *from)
|
||||
{
|
||||
ClosePortalStmt *newnode = makeNode(ClosePortalStmt);
|
||||
ClosePortalStmt *newnode = makeNode(ClosePortalStmt);
|
||||
|
||||
if (from->portalname)
|
||||
newnode->portalname = pstrdup(from->portalname);
|
||||
|
@ -1518,7 +1520,7 @@ _copyClosePortalStmt(ClosePortalStmt *from)
|
|||
static TruncateStmt *
|
||||
_copyTruncateStmt(TruncateStmt *from)
|
||||
{
|
||||
TruncateStmt *newnode = makeNode(TruncateStmt);
|
||||
TruncateStmt *newnode = makeNode(TruncateStmt);
|
||||
|
||||
newnode->relName = pstrdup(from->relName);
|
||||
|
||||
|
@ -1528,7 +1530,7 @@ _copyTruncateStmt(TruncateStmt *from)
|
|||
static NotifyStmt *
|
||||
_copyNotifyStmt(NotifyStmt *from)
|
||||
{
|
||||
NotifyStmt *newnode = makeNode(NotifyStmt);
|
||||
NotifyStmt *newnode = makeNode(NotifyStmt);
|
||||
|
||||
if (from->relname)
|
||||
newnode->relname = pstrdup(from->relname);
|
||||
|
@ -1539,7 +1541,7 @@ _copyNotifyStmt(NotifyStmt *from)
|
|||
static ListenStmt *
|
||||
_copyListenStmt(ListenStmt *from)
|
||||
{
|
||||
ListenStmt *newnode = makeNode(ListenStmt);
|
||||
ListenStmt *newnode = makeNode(ListenStmt);
|
||||
|
||||
if (from->relname)
|
||||
newnode->relname = pstrdup(from->relname);
|
||||
|
@ -1550,7 +1552,7 @@ _copyListenStmt(ListenStmt *from)
|
|||
static UnlistenStmt *
|
||||
_copyUnlistenStmt(UnlistenStmt *from)
|
||||
{
|
||||
UnlistenStmt *newnode = makeNode(UnlistenStmt);
|
||||
UnlistenStmt *newnode = makeNode(UnlistenStmt);
|
||||
|
||||
if (from->relname)
|
||||
newnode->relname = pstrdup(from->relname);
|
||||
|
@ -1561,7 +1563,7 @@ _copyUnlistenStmt(UnlistenStmt *from)
|
|||
static TransactionStmt *
|
||||
_copyTransactionStmt(TransactionStmt *from)
|
||||
{
|
||||
TransactionStmt *newnode = makeNode(TransactionStmt);
|
||||
TransactionStmt *newnode = makeNode(TransactionStmt);
|
||||
|
||||
newnode->command = from->command;
|
||||
|
||||
|
@ -1571,7 +1573,7 @@ _copyTransactionStmt(TransactionStmt *from)
|
|||
static LoadStmt *
|
||||
_copyLoadStmt(LoadStmt *from)
|
||||
{
|
||||
LoadStmt *newnode = makeNode(LoadStmt);
|
||||
LoadStmt *newnode = makeNode(LoadStmt);
|
||||
|
||||
if (from->filename)
|
||||
newnode->filename = pstrdup(from->filename);
|
||||
|
@ -1582,7 +1584,7 @@ _copyLoadStmt(LoadStmt *from)
|
|||
static VariableSetStmt *
|
||||
_copyVariableSetStmt(VariableSetStmt *from)
|
||||
{
|
||||
VariableSetStmt *newnode = makeNode(VariableSetStmt);
|
||||
VariableSetStmt *newnode = makeNode(VariableSetStmt);
|
||||
|
||||
if (from->name)
|
||||
newnode->name = pstrdup(from->name);
|
||||
|
@ -1595,7 +1597,7 @@ _copyVariableSetStmt(VariableSetStmt *from)
|
|||
static VariableResetStmt *
|
||||
_copyVariableResetStmt(VariableResetStmt *from)
|
||||
{
|
||||
VariableResetStmt *newnode = makeNode(VariableResetStmt);
|
||||
VariableResetStmt *newnode = makeNode(VariableResetStmt);
|
||||
|
||||
if (from->name)
|
||||
newnode->name = pstrdup(from->name);
|
||||
|
@ -1606,7 +1608,7 @@ _copyVariableResetStmt(VariableResetStmt *from)
|
|||
static LockStmt *
|
||||
_copyLockStmt(LockStmt *from)
|
||||
{
|
||||
LockStmt *newnode = makeNode(LockStmt);
|
||||
LockStmt *newnode = makeNode(LockStmt);
|
||||
|
||||
if (from->relname)
|
||||
newnode->relname = pstrdup(from->relname);
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/equalfuncs.c,v 1.65 2000/03/22 22:08:32 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/equalfuncs.c,v 1.66 2000/04/12 17:15:16 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -81,9 +81,11 @@ _equalFjoin(Fjoin *a, Fjoin *b)
|
|||
static bool
|
||||
_equalExpr(Expr *a, Expr *b)
|
||||
{
|
||||
/* We do not examine typeOid, since the optimizer often doesn't
|
||||
* bother to set it in created nodes, and it is logically a
|
||||
* derivative of the oper field anyway.
|
||||
|
||||
/*
|
||||
* We do not examine typeOid, since the optimizer often doesn't bother
|
||||
* to set it in created nodes, and it is logically a derivative of the
|
||||
* oper field anyway.
|
||||
*/
|
||||
if (a->opType != b->opType)
|
||||
return false;
|
||||
|
@ -134,7 +136,9 @@ _equalOper(Oper *a, Oper *b)
|
|||
return false;
|
||||
if (a->opresulttype != b->opresulttype)
|
||||
return false;
|
||||
/* We do not examine opid, opsize, or op_fcache, since these are
|
||||
|
||||
/*
|
||||
* We do not examine opid, opsize, or op_fcache, since these are
|
||||
* logically derived from opno, and they may not be set yet depending
|
||||
* on how far along the node is in the parse/plan pipeline.
|
||||
*
|
||||
|
@ -156,10 +160,11 @@ _equalConst(Const *a, Const *b)
|
|||
if (a->constbyval != b->constbyval)
|
||||
return false;
|
||||
/* XXX What about constisset and constiscast? */
|
||||
|
||||
/*
|
||||
* We treat all NULL constants of the same type as equal.
|
||||
* Someday this might need to change? But datumIsEqual
|
||||
* doesn't work on nulls, so...
|
||||
* We treat all NULL constants of the same type as equal. Someday this
|
||||
* might need to change? But datumIsEqual doesn't work on nulls,
|
||||
* so...
|
||||
*/
|
||||
if (a->constisnull)
|
||||
return true;
|
||||
|
@ -320,7 +325,9 @@ _equalArrayRef(ArrayRef *a, ArrayRef *b)
|
|||
static bool
|
||||
_equalRelOptInfo(RelOptInfo *a, RelOptInfo *b)
|
||||
{
|
||||
/* We treat RelOptInfos as equal if they refer to the same base rels
|
||||
|
||||
/*
|
||||
* We treat RelOptInfos as equal if they refer to the same base rels
|
||||
* joined in the same order. Is this sufficient?
|
||||
*/
|
||||
return equali(a->relids, b->relids);
|
||||
|
@ -329,8 +336,10 @@ _equalRelOptInfo(RelOptInfo *a, RelOptInfo *b)
|
|||
static bool
|
||||
_equalIndexOptInfo(IndexOptInfo *a, IndexOptInfo *b)
|
||||
{
|
||||
/* We treat IndexOptInfos as equal if they refer to the same index.
|
||||
* Is this sufficient?
|
||||
|
||||
/*
|
||||
* We treat IndexOptInfos as equal if they refer to the same index. Is
|
||||
* this sufficient?
|
||||
*/
|
||||
if (a->indexoid != b->indexoid)
|
||||
return false;
|
||||
|
@ -354,7 +363,9 @@ _equalPath(Path *a, Path *b)
|
|||
return false;
|
||||
if (!equal(a->parent, b->parent))
|
||||
return false;
|
||||
/* do not check path costs, since they may not be set yet, and being
|
||||
|
||||
/*
|
||||
* do not check path costs, since they may not be set yet, and being
|
||||
* float values there are roundoff error issues anyway...
|
||||
*/
|
||||
if (!equal(a->pathkeys, b->pathkeys))
|
||||
|
@ -375,8 +386,10 @@ _equalIndexPath(IndexPath *a, IndexPath *b)
|
|||
return false;
|
||||
if (!equali(a->joinrelids, b->joinrelids))
|
||||
return false;
|
||||
/* Skip 'rows' because of possibility of floating-point roundoff error.
|
||||
* It should be derivable from the other fields anyway.
|
||||
|
||||
/*
|
||||
* Skip 'rows' because of possibility of floating-point roundoff
|
||||
* error. It should be derivable from the other fields anyway.
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
|
@ -442,12 +455,13 @@ _equalHashPath(HashPath *a, HashPath *b)
|
|||
/* XXX This equality function is a quick hack, should be
|
||||
* fixed to compare all fields.
|
||||
*
|
||||
* XXX Why is this even here? We don't have equal() funcs for
|
||||
* any other kinds of Plan nodes... likely this is dead code...
|
||||
* XXX Why is this even here? We don't have equal() funcs for
|
||||
* any other kinds of Plan nodes... likely this is dead code...
|
||||
*/
|
||||
static bool
|
||||
_equalIndexScan(IndexScan *a, IndexScan *b)
|
||||
{
|
||||
|
||||
/*
|
||||
* if(a->scan.plan.cost != b->scan.plan.cost) return(false);
|
||||
*/
|
||||
|
@ -642,9 +656,9 @@ _equalQuery(Query *a, Query *b)
|
|||
|
||||
/*
|
||||
* We do not check the internal-to-the-planner fields: base_rel_list,
|
||||
* join_rel_list, equi_key_list, query_pathkeys.
|
||||
* They might not be set yet, and in any case they should be derivable
|
||||
* from the other fields.
|
||||
* join_rel_list, equi_key_list, query_pathkeys. They might not be set
|
||||
* yet, and in any case they should be derivable from the other
|
||||
* fields.
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
|
@ -882,7 +896,8 @@ equal(void *a, void *b)
|
|||
List *lb = (List *) b;
|
||||
List *l;
|
||||
|
||||
/* Try to reject by length check before we grovel through
|
||||
/*
|
||||
* Try to reject by length check before we grovel through
|
||||
* all the elements...
|
||||
*/
|
||||
if (length(la) != length(lb))
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/Attic/freefuncs.c,v 1.39 2000/03/14 23:06:28 thomas Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/Attic/freefuncs.c,v 1.40 2000/04/12 17:15:16 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -746,7 +746,9 @@ _freeRelOptInfo(RelOptInfo *node)
|
|||
|
||||
freeObject(node->targetlist);
|
||||
freeObject(node->pathlist);
|
||||
/* XXX is this right? cheapest-path fields will typically be pointers
|
||||
|
||||
/*
|
||||
* XXX is this right? cheapest-path fields will typically be pointers
|
||||
* into pathlist, not separate structs...
|
||||
*/
|
||||
freeObject(node->cheapest_startup_path);
|
||||
|
@ -870,7 +872,9 @@ FreeJoinPathFields(JoinPath *node)
|
|||
{
|
||||
freeObject(node->outerjoinpath);
|
||||
freeObject(node->innerjoinpath);
|
||||
/* XXX probably wrong, since ordinarily a JoinPath would share its
|
||||
|
||||
/*
|
||||
* XXX probably wrong, since ordinarily a JoinPath would share its
|
||||
* restrictinfo list with other paths made for the same join?
|
||||
*/
|
||||
freeObject(node->joinrestrictinfo);
|
||||
|
@ -970,7 +974,9 @@ _freeRestrictInfo(RestrictInfo *node)
|
|||
* ----------------
|
||||
*/
|
||||
freeObject(node->clause);
|
||||
/* this is certainly wrong? IndexOptInfos don't belong to
|
||||
|
||||
/*
|
||||
* this is certainly wrong? IndexOptInfos don't belong to
|
||||
* RestrictInfo...
|
||||
*/
|
||||
freeObject(node->subclauseindices);
|
||||
|
@ -1131,8 +1137,8 @@ _freeValue(Value *node)
|
|||
{
|
||||
switch (node->type)
|
||||
{
|
||||
case T_Float:
|
||||
case T_String:
|
||||
case T_Float:
|
||||
case T_String:
|
||||
pfree(node->val.str);
|
||||
break;
|
||||
default:
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue