Remove pg_upgrade support for upgrading from pre-9.2 servers.

Per discussion, we'll limit support for old servers to those branches
that can still be built easily on modern platforms, which as of now
is 9.2 and up.

Discussion: https://postgr.es/m/2923349.1634942313@sss.pgh.pa.us
This commit is contained in:
Tom Lane 2021-12-14 19:17:55 -05:00
parent 30e7c175b8
commit e469f0aaf3
9 changed files with 43 additions and 227 deletions

View file

@ -68,7 +68,7 @@ PostgreSQL documentation
</para>
<para>
pg_upgrade supports upgrades from 8.4.X and later to the current
pg_upgrade supports upgrades from 9.2.X and later to the current
major release of <productname>PostgreSQL</productname>, including snapshot and beta releases.
</para>
</refsect1>
@ -800,21 +800,6 @@ psql --username=postgres --file=script.sql postgres
(<type>regclass</type>, <type>regrole</type>, and <type>regtype</type> can be upgraded.)
</para>
<para>
If you are upgrading a pre-<productname>PostgreSQL</productname> 9.2 cluster
that uses a configuration-file-only directory, you must pass the
real data directory location to <application>pg_upgrade</application>, and
pass the configuration directory location to the server, e.g.,
<literal>-d /real-data-directory -o '-D /configuration-directory'</literal>.
</para>
<para>
If using a pre-9.1 old server that is using a non-default Unix-domain
socket directory or a default that differs from the default of the
new cluster, set <envar>PGHOST</envar> to point to the old server's socket
location. (This is not relevant on Windows.)
</para>
<para>
If you want to use link mode and you do not want your old cluster
to be modified when the new cluster is started, consider using the clone mode.

View file

@ -159,10 +159,6 @@ check_and_dump_old_cluster(bool live_check)
if (GET_MAJOR_VERSION(old_cluster.major_version) <= 903)
old_9_3_check_for_line_data_type_usage(&old_cluster);
/* Pre-PG 9.0 had no large object permissions */
if (GET_MAJOR_VERSION(old_cluster.major_version) <= 804)
new_9_0_populate_pg_largeobject_metadata(&old_cluster, true);
/*
* While not a check option, we do this now because this is the only time
* the old server is running.
@ -233,10 +229,6 @@ issue_warnings_and_set_wal_level(void)
*/
start_postmaster(&new_cluster, true);
/* Create dummy large object permissions for old < PG 9.0? */
if (GET_MAJOR_VERSION(old_cluster.major_version) <= 804)
new_9_0_populate_pg_largeobject_metadata(&new_cluster, false);
/* Reindex hash indexes for old < 10.0 */
if (GET_MAJOR_VERSION(old_cluster.major_version) <= 906)
old_9_6_invalidate_hash_indexes(&new_cluster, false);
@ -295,8 +287,8 @@ check_cluster_versions(void)
* upgrades
*/
if (GET_MAJOR_VERSION(old_cluster.major_version) < 804)
pg_fatal("This utility can only upgrade from PostgreSQL version 8.4 and later.\n");
if (GET_MAJOR_VERSION(old_cluster.major_version) < 902)
pg_fatal("This utility can only upgrade from PostgreSQL version 9.2 and later.\n");
/* Only current PG version is supported as a target */
if (GET_MAJOR_VERSION(new_cluster.major_version) != GET_MAJOR_VERSION(PG_VERSION_NUM))
@ -331,12 +323,6 @@ check_cluster_compatibility(bool live_check)
get_control_data(&new_cluster, false);
check_control_data(&old_cluster.controldata, &new_cluster.controldata);
/* We read the real port number for PG >= 9.1 */
if (live_check && GET_MAJOR_VERSION(old_cluster.major_version) <= 900 &&
old_cluster.port == DEF_PGUPORT)
pg_fatal("When checking a pre-PG 9.1 live old server, "
"you must specify the old server's port number.\n");
if (live_check && old_cluster.port == new_cluster.port)
pg_fatal("When checking a live server, "
"the old and new port numbers must be different.\n");
@ -479,11 +465,6 @@ check_databases_are_compatible(void)
* they do, it would cause an error while restoring global objects.
* This allows the failure to be detected at check time, rather than
* during schema restore.
*
* Note, v8.4 has no tablespace_suffix, which is fine so long as the
* version being upgraded *to* has a suffix, since it's not allowed
* to pg_upgrade from a version to the same version if tablespaces are
* in use.
*/
static void
check_for_new_tablespace_dir(ClusterInfo *new_cluster)
@ -597,11 +578,6 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name)
int dbnum;
fprintf(script, "\n");
/* remove PG_VERSION? */
if (GET_MAJOR_VERSION(old_cluster.major_version) <= 804)
fprintf(script, RM_CMD " %s%cPG_VERSION\n",
fix_path_separator(os_info.old_tablespaces[tblnum]),
PATH_SEPARATOR);
for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
fprintf(script, RMDIR_CMD " %c%s%c%u%c\n", PATH_QUOTE,

View file

@ -118,15 +118,9 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db,
* Verify that rels of same OID have same name. The namespace name
* should always match, but the relname might not match for TOAST
* tables (and, therefore, their indexes).
*
* TOAST table names initially match the heap pg_class oid, but
* pre-9.0 they can change during certain commands such as CLUSTER, so
* don't insist on a match if old cluster is < 9.0.
*/
if (strcmp(old_rel->nspname, new_rel->nspname) != 0 ||
(strcmp(old_rel->relname, new_rel->relname) != 0 &&
(GET_MAJOR_VERSION(old_cluster.major_version) >= 900 ||
strcmp(old_rel->nspname, "pg_toast") != 0)))
strcmp(old_rel->relname, new_rel->relname) != 0)
{
pg_log(PG_WARNING, "Relation names for OID %u in database \"%s\" do not match: "
"old name \"%s.%s\", new name \"%s.%s\"\n",
@ -352,16 +346,13 @@ get_db_infos(ClusterInfo *cluster)
snprintf(query, sizeof(query),
"SELECT d.oid, d.datname, d.encoding, d.datcollate, d.datctype, "
"%s AS spclocation "
"pg_catalog.pg_tablespace_location(t.oid) AS spclocation "
"FROM pg_catalog.pg_database d "
" LEFT OUTER JOIN pg_catalog.pg_tablespace t "
" ON d.dattablespace = t.oid "
"WHERE d.datallowconn = true "
/* we don't preserve pg_database.oid so we sort by name */
"ORDER BY 2",
/* 9.2 removed the spclocation column */
(GET_MAJOR_VERSION(cluster->major_version) <= 901) ?
"t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid)");
"ORDER BY 2");
res = executeQueryOrDie(conn, "%s", query);
@ -492,7 +483,8 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
*/
snprintf(query + strlen(query), sizeof(query) - strlen(query),
"SELECT all_rels.*, n.nspname, c.relname, "
" c.relfilenode, c.reltablespace, %s "
" c.relfilenode, c.reltablespace, "
" pg_catalog.pg_tablespace_location(t.oid) AS spclocation "
"FROM (SELECT * FROM regular_heap "
" UNION ALL "
" SELECT * FROM toast_heap "
@ -504,11 +496,7 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
" ON c.relnamespace = n.oid "
" LEFT OUTER JOIN pg_catalog.pg_tablespace t "
" ON c.reltablespace = t.oid "
"ORDER BY 1;",
/* 9.2 removed the pg_tablespace.spclocation column */
(GET_MAJOR_VERSION(cluster->major_version) >= 902) ?
"pg_catalog.pg_tablespace_location(t.oid) AS spclocation" :
"t.spclocation");
"ORDER BY 1;");
res = executeQueryOrDie(conn, "%s", query);

View file

@ -89,19 +89,7 @@ extern char *output_files[];
/*
* postmaster/postgres -b (binary_upgrade) flag added during PG 9.1
* development
*/
#define BINARY_UPGRADE_SERVER_FLAG_CAT_VER 201104251
/*
* Visibility map changed with this 9.2 commit,
* 8f9fe6edce358f7904e0db119416b4d1080a83aa; pick later catalog version.
*/
#define VISIBILITY_MAP_CRASHSAFE_CAT_VER 201107031
/*
* The format of visibility map is changed with this 9.6 commit,
* The format of visibility map was changed with this 9.6 commit.
*/
#define VISIBILITY_MAP_FROZEN_BIT_CAT_VER 201603011
@ -447,8 +435,6 @@ bool check_for_data_types_usage(ClusterInfo *cluster,
bool check_for_data_type_usage(ClusterInfo *cluster,
const char *type_name,
const char *output_path);
void new_9_0_populate_pg_largeobject_metadata(ClusterInfo *cluster,
bool check_mode);
void old_9_3_check_for_line_data_type_usage(ClusterInfo *cluster);
void old_9_6_check_for_unknown_data_type_usage(ClusterInfo *cluster);
void old_9_6_invalidate_hash_indexes(ClusterInfo *cluster,

View file

@ -137,17 +137,8 @@ static void
transfer_single_new_db(FileNameMap *maps, int size, char *old_tablespace)
{
int mapnum;
bool vm_crashsafe_match = true;
bool vm_must_add_frozenbit = false;
/*
* Do the old and new cluster disagree on the crash-safetiness of the vm
* files? If so, do not copy them.
*/
if (old_cluster.controldata.cat_ver < VISIBILITY_MAP_CRASHSAFE_CAT_VER &&
new_cluster.controldata.cat_ver >= VISIBILITY_MAP_CRASHSAFE_CAT_VER)
vm_crashsafe_match = false;
/*
* Do we need to rewrite visibilitymap?
*/
@ -167,8 +158,7 @@ transfer_single_new_db(FileNameMap *maps, int size, char *old_tablespace)
* Copy/link any fsm and vm files, if they exist
*/
transfer_relfile(&maps[mapnum], "_fsm", vm_must_add_frozenbit);
if (vm_crashsafe_match)
transfer_relfile(&maps[mapnum], "_vm", vm_must_add_frozenbit);
transfer_relfile(&maps[mapnum], "_vm", vm_must_add_frozenbit);
}
}
}

View file

@ -227,14 +227,7 @@ start_postmaster(ClusterInfo *cluster, bool report_and_exit_on_error)
#endif
/*
* Since PG 9.1, we have used -b to disable autovacuum. For earlier
* releases, setting autovacuum=off disables cleanup vacuum and analyze,
* but freeze vacuums can still happen, so we set
* autovacuum_freeze_max_age to its maximum.
* (autovacuum_multixact_freeze_max_age was introduced after 9.1, so there
* is no need to set that.) We assume all datfrozenxid and relfrozenxid
* values are less than a gap of 2000000000 from the current xid counter,
* so autovacuum will not touch them.
* Use -b to disable autovacuum.
*
* Turn off durability requirements to improve object creation speed, and
* we only modify the new cluster, so only use it there. If there is a
@ -245,11 +238,8 @@ start_postmaster(ClusterInfo *cluster, bool report_and_exit_on_error)
* vacuumdb --freeze actually freezes the tuples.
*/
snprintf(cmd, sizeof(cmd),
"\"%s/pg_ctl\" -w -l \"%s\" -D \"%s\" -o \"-p %d%s%s %s%s\" start",
"\"%s/pg_ctl\" -w -l \"%s\" -D \"%s\" -o \"-p %d -b%s %s%s\" start",
cluster->bindir, SERVER_LOG_FILE, cluster->pgconfig, cluster->port,
(cluster->controldata.cat_ver >=
BINARY_UPGRADE_SERVER_FLAG_CAT_VER) ? " -b" :
" -c autovacuum=off -c autovacuum_freeze_max_age=2000000000",
(cluster == &new_cluster) ?
" -c synchronous_commit=off -c fsync=off -c full_page_writes=off -c vacuum_defer_cleanup_age=0" : "",
cluster->pgopts ? cluster->pgopts : "", socket_string);

View file

@ -46,13 +46,10 @@ get_tablespace_paths(void)
char query[QUERY_ALLOC];
snprintf(query, sizeof(query),
"SELECT %s "
"SELECT pg_catalog.pg_tablespace_location(oid) AS spclocation "
"FROM pg_catalog.pg_tablespace "
"WHERE spcname != 'pg_default' AND "
" spcname != 'pg_global'",
/* 9.2 removed the spclocation column */
(GET_MAJOR_VERSION(old_cluster.major_version) <= 901) ?
"spclocation" : "pg_catalog.pg_tablespace_location(oid) AS spclocation");
" spcname != 'pg_global'");
res = executeQueryOrDie(conn, "%s", query);
@ -105,15 +102,10 @@ get_tablespace_paths(void)
static void
set_tablespace_directory_suffix(ClusterInfo *cluster)
{
if (GET_MAJOR_VERSION(cluster->major_version) <= 804)
cluster->tablespace_suffix = pg_strdup("");
else
{
/* This cluster has a version-specific subdirectory */
/* This cluster has a version-specific subdirectory */
/* The leading slash is needed to start a new directory. */
cluster->tablespace_suffix = psprintf("/PG_%s_%d",
cluster->major_version_str,
cluster->controldata.cat_ver);
}
/* The leading slash is needed to start a new directory. */
cluster->tablespace_suffix = psprintf("/PG_%s_%d",
cluster->major_version_str,
cluster->controldata.cat_ver);
}

View file

@ -202,9 +202,6 @@ if "$MAKE" -C "$oldsrc" installcheck-parallel; then
# update references to old source tree's regress.so etc
fix_sql=""
case $oldpgversion in
804??)
fix_sql="UPDATE pg_proc SET probin = replace(probin::text, '$oldsrc', '$newsrc')::bytea WHERE probin LIKE '$oldsrc%';"
;;
*)
fix_sql="UPDATE pg_proc SET probin = replace(probin, '$oldsrc', '$newsrc') WHERE probin LIKE '$oldsrc%';"
;;

View file

@ -13,88 +13,6 @@
#include "fe_utils/string_utils.h"
#include "pg_upgrade.h"
/*
* new_9_0_populate_pg_largeobject_metadata()
* new >= 9.0, old <= 8.4
* 9.0 has a new pg_largeobject permission table
*/
void
new_9_0_populate_pg_largeobject_metadata(ClusterInfo *cluster, bool check_mode)
{
int dbnum;
FILE *script = NULL;
bool found = false;
char output_path[MAXPGPATH];
prep_status("Checking for large objects");
snprintf(output_path, sizeof(output_path), "pg_largeobject.sql");
for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
{
PGresult *res;
int i_count;
DbInfo *active_db = &cluster->dbarr.dbs[dbnum];
PGconn *conn = connectToServer(cluster, active_db->db_name);
/* find if there are any large objects */
res = executeQueryOrDie(conn,
"SELECT count(*) "
"FROM pg_catalog.pg_largeobject ");
i_count = PQfnumber(res, "count");
if (atoi(PQgetvalue(res, 0, i_count)) != 0)
{
found = true;
if (!check_mode)
{
PQExpBufferData connectbuf;
if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
pg_fatal("could not open file \"%s\": %s\n", output_path,
strerror(errno));
initPQExpBuffer(&connectbuf);
appendPsqlMetaConnect(&connectbuf, active_db->db_name);
fputs(connectbuf.data, script);
termPQExpBuffer(&connectbuf);
fprintf(script,
"SELECT pg_catalog.lo_create(t.loid)\n"
"FROM (SELECT DISTINCT loid FROM pg_catalog.pg_largeobject) AS t;\n");
}
}
PQclear(res);
PQfinish(conn);
}
if (script)
fclose(script);
if (found)
{
report_status(PG_WARNING, "warning");
if (check_mode)
pg_log(PG_WARNING, "\n"
"Your installation contains large objects. The new database has an\n"
"additional large object permission table. After upgrading, you will be\n"
"given a command to populate the pg_largeobject_metadata table with\n"
"default permissions.\n\n");
else
pg_log(PG_WARNING, "\n"
"Your installation contains large objects. The new database has an\n"
"additional large object permission table, so default permissions must be\n"
"defined for all large objects. The file\n"
" %s\n"
"when executed by psql by the database superuser will set the default\n"
"permissions.\n\n",
output_path);
}
else
check_ok();
}
/*
* check_for_data_types_usage()
@ -158,38 +76,32 @@ check_for_data_types_usage(ClusterInfo *cluster,
" t.oid = c.reltype AND "
" c.oid = a.attrelid AND "
" NOT a.attisdropped AND "
" a.atttypid = x.oid ",
base_query);
/* Ranges were introduced in 9.2 */
if (GET_MAJOR_VERSION(cluster->major_version) >= 902)
appendPQExpBufferStr(&querybuf,
" UNION ALL "
/* ranges containing any type selected so far */
" SELECT t.oid FROM pg_catalog.pg_type t, pg_catalog.pg_range r, x "
" WHERE t.typtype = 'r' AND r.rngtypid = t.oid AND r.rngsubtype = x.oid");
appendPQExpBufferStr(&querybuf,
" ) foo "
") "
" a.atttypid = x.oid "
" UNION ALL "
/* ranges containing any type selected so far */
" SELECT t.oid FROM pg_catalog.pg_type t, pg_catalog.pg_range r, x "
" WHERE t.typtype = 'r' AND r.rngtypid = t.oid AND r.rngsubtype = x.oid"
" ) foo "
") "
/* now look for stored columns of any such type */
"SELECT n.nspname, c.relname, a.attname "
"FROM pg_catalog.pg_class c, "
" pg_catalog.pg_namespace n, "
" pg_catalog.pg_attribute a "
"WHERE c.oid = a.attrelid AND "
" NOT a.attisdropped AND "
" a.atttypid IN (SELECT oid FROM oids) AND "
" c.relkind IN ("
CppAsString2(RELKIND_RELATION) ", "
CppAsString2(RELKIND_MATVIEW) ", "
CppAsString2(RELKIND_INDEX) ") AND "
" c.relnamespace = n.oid AND "
"SELECT n.nspname, c.relname, a.attname "
"FROM pg_catalog.pg_class c, "
" pg_catalog.pg_namespace n, "
" pg_catalog.pg_attribute a "
"WHERE c.oid = a.attrelid AND "
" NOT a.attisdropped AND "
" a.atttypid IN (SELECT oid FROM oids) AND "
" c.relkind IN ("
CppAsString2(RELKIND_RELATION) ", "
CppAsString2(RELKIND_MATVIEW) ", "
CppAsString2(RELKIND_INDEX) ") AND "
" c.relnamespace = n.oid AND "
/* exclude possible orphaned temp tables */
" n.nspname !~ '^pg_temp_' AND "
" n.nspname !~ '^pg_toast_temp_' AND "
" n.nspname !~ '^pg_temp_' AND "
" n.nspname !~ '^pg_toast_temp_' AND "
/* exclude system catalogs, too */
" n.nspname NOT IN ('pg_catalog', 'information_schema')");
" n.nspname NOT IN ('pg_catalog', 'information_schema')",
base_query);
res = executeQueryOrDie(conn, "%s", querybuf.data);