Have pg_upgrade create its output files in the current directory, rather

than in a subdirectory of the $HOME directory, or $TMP in Windows.
This commit is contained in:
Bruce Momjian 2010-06-12 17:05:29 +00:00
parent 99fdb4a9ea
commit 1dc7c796c9
8 changed files with 21 additions and 63 deletions

View file

@ -381,7 +381,7 @@ create_script_for_old_cluster_deletion(migratorContext *ctx,
prep_status(ctx, "Creating script to delete old cluster");
snprintf(*deletion_script_file_name, MAXPGPATH, "%s/delete_old_cluster.%s",
ctx->output_dir, EXEC_EXT);
ctx->cwd, EXEC_EXT);
if ((script = fopen(*deletion_script_file_name, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Could not create necessary file: %s\n",

View file

@ -21,7 +21,7 @@ generate_old_dump(migratorContext *ctx)
exec_prog(ctx, true,
SYSTEMQUOTE "\"%s/pg_dumpall\" --port %d --schema-only "
"--binary-upgrade > \"%s/" ALL_DUMP_FILE "\"" SYSTEMQUOTE,
ctx->new.bindir, ctx->old.port, ctx->output_dir);
ctx->new.bindir, ctx->old.port, ctx->cwd);
check_ok(ctx);
}
@ -52,13 +52,13 @@ split_old_dump(migratorContext *ctx)
char filename[MAXPGPATH];
bool suppressed_username = false;
snprintf(filename, sizeof(filename), "%s/%s", ctx->output_dir, ALL_DUMP_FILE);
snprintf(filename, sizeof(filename), "%s/%s", ctx->cwd, ALL_DUMP_FILE);
if ((all_dump = fopen(filename, "r")) == NULL)
pg_log(ctx, PG_FATAL, "Cannot open dump file %s\n", filename);
snprintf(filename, sizeof(filename), "%s/%s", ctx->output_dir, GLOBALS_DUMP_FILE);
snprintf(filename, sizeof(filename), "%s/%s", ctx->cwd, GLOBALS_DUMP_FILE);
if ((globals_dump = fopen(filename, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Cannot write to dump file %s\n", filename);
snprintf(filename, sizeof(filename), "%s/%s", ctx->output_dir, DB_DUMP_FILE);
snprintf(filename, sizeof(filename), "%s/%s", ctx->cwd, DB_DUMP_FILE);
if ((db_dump = fopen(filename, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Cannot write to dump file %s\n", filename);
current_output = globals_dump;

View file

@ -213,7 +213,7 @@ check_loadable_libraries(migratorContext *ctx)
prep_status(ctx, "Checking for presence of required libraries");
snprintf(output_path, sizeof(output_path), "%s/loadable_libraries.txt",
ctx->output_dir);
ctx->cwd);
for (libnum = 0; libnum < ctx->num_libraries; libnum++)
{

View file

@ -84,20 +84,7 @@ parseCommandLine(migratorContext *ctx, int argc, char *argv[])
if (user_id == 0)
pg_log(ctx, PG_FATAL, "%s: cannot be run as root\n", ctx->progname);
#ifndef WIN32
get_home_path(ctx->home_dir);
#else
{
char *tmppath;
/* TMP is the best place on Windows, rather than APPDATA */
if ((tmppath = getenv("TMP")) == NULL)
pg_log(ctx, PG_FATAL, "TMP environment variable is not set.\n");
snprintf(ctx->home_dir, MAXPGPATH, "%s", tmppath);
}
#endif
snprintf(ctx->output_dir, MAXPGPATH, "%s/" OUTPUT_SUBDIR, ctx->home_dir);
getcwd(ctx->cwd, MAXPGPATH);
while ((option = getopt_long(argc, argv, "d:D:b:B:cgG:kl:p:P:u:v",
long_options, &optindex)) != -1)

View file

@ -18,7 +18,6 @@ static void copy_clog_xlog_xid(migratorContext *ctx);
static void set_frozenxids(migratorContext *ctx);
static void setup(migratorContext *ctx, char *argv0, bool live_check);
static void cleanup(migratorContext *ctx);
static void create_empty_output_directory(migratorContext *ctx);
int
@ -37,8 +36,6 @@ main(int argc, char **argv)
setup(&ctx, argv[0], live_check);
create_empty_output_directory(&ctx);
check_cluster_versions(&ctx);
check_cluster_compatibility(&ctx, live_check);
@ -201,7 +198,7 @@ prepare_new_databases(migratorContext *ctx)
exec_prog(ctx, true,
SYSTEMQUOTE "\"%s/psql\" --set ON_ERROR_STOP=on --port %d "
"-f \"%s/%s\" --dbname template1 >> \"%s\"" SYSTEMQUOTE,
ctx->new.bindir, ctx->new.port, ctx->output_dir,
ctx->new.bindir, ctx->new.port, ctx->cwd,
GLOBALS_DUMP_FILE, ctx->logfile);
check_ok(ctx);
@ -223,7 +220,7 @@ create_new_objects(migratorContext *ctx)
exec_prog(ctx, true,
SYSTEMQUOTE "\"%s/psql\" --set ON_ERROR_STOP=on --port %d "
"-f \"%s/%s\" --dbname template1 >> \"%s\"" SYSTEMQUOTE,
ctx->new.bindir, ctx->new.port, ctx->output_dir,
ctx->new.bindir, ctx->new.port, ctx->cwd,
DB_DUMP_FILE, ctx->logfile);
check_ok(ctx);
@ -399,33 +396,10 @@ cleanup(migratorContext *ctx)
if (ctx->debug_fd)
fclose(ctx->debug_fd);
snprintf(filename, sizeof(filename), "%s/%s", ctx->output_dir, ALL_DUMP_FILE);
snprintf(filename, sizeof(filename), "%s/%s", ctx->cwd, ALL_DUMP_FILE);
unlink(filename);
snprintf(filename, sizeof(filename), "%s/%s", ctx->output_dir, GLOBALS_DUMP_FILE);
snprintf(filename, sizeof(filename), "%s/%s", ctx->cwd, GLOBALS_DUMP_FILE);
unlink(filename);
snprintf(filename, sizeof(filename), "%s/%s", ctx->output_dir, DB_DUMP_FILE);
snprintf(filename, sizeof(filename), "%s/%s", ctx->cwd, DB_DUMP_FILE);
unlink(filename);
}
/*
* create_empty_output_directory
*
* Create empty directory for output files
*/
static void
create_empty_output_directory(migratorContext *ctx)
{
/*
* rmtree() outputs a warning if the directory does not exist,
* so we try to create the directory first.
*/
if (mkdir(ctx->output_dir, S_IRWXU) != 0)
{
if (errno == EEXIST)
rmtree(ctx->output_dir, false);
else
pg_log(ctx, PG_FATAL, "Cannot create subdirectory %s: %s\n",
ctx->output_dir, getErrorText(errno));
}
}

View file

@ -29,8 +29,6 @@
#define OVERWRITE_MESSAGE " %-" MESSAGE_WIDTH "." MESSAGE_WIDTH "s\r"
#define GET_MAJOR_VERSION(v) ((v) / 100)
#define OUTPUT_SUBDIR "pg_upgrade_output"
#define ALL_DUMP_FILE "pg_upgrade_dump_all.sql"
/* contains both global db information and CREATE DATABASE commands */
#define GLOBALS_DUMP_FILE "pg_upgrade_dump_globals.sql"
@ -217,8 +215,7 @@ typedef struct
const char *progname; /* complete pathname for this program */
char *exec_path; /* full path to my executable */
char *user; /* username for clusters */
char home_dir[MAXPGPATH]; /* name of user's home directory */
char output_dir[MAXPGPATH]; /* directory for pg_upgrade output */
char cwd[MAXPGPATH]; /* directory for pg_upgrade output */
char **tablespaces; /* tablespaces */
int num_tablespaces;
char **libraries; /* loadable libraries */

View file

@ -28,7 +28,7 @@ new_9_0_populate_pg_largeobject_metadata(migratorContext *ctx, bool check_mode,
prep_status(ctx, "Checking for large objects");
snprintf(output_path, sizeof(output_path), "%s/pg_largeobject.sql",
ctx->output_dir);
ctx->cwd);
for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
{

View file

@ -28,7 +28,7 @@ old_8_3_check_for_name_data_type_usage(migratorContext *ctx, Cluster whichCluste
prep_status(ctx, "Checking for invalid 'name' user columns");
snprintf(output_path, sizeof(output_path), "%s/tables_using_name.txt",
ctx->output_dir);
ctx->cwd);
for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
{
@ -123,7 +123,7 @@ old_8_3_check_for_tsquery_usage(migratorContext *ctx, Cluster whichCluster)
prep_status(ctx, "Checking for tsquery user columns");
snprintf(output_path, sizeof(output_path), "%s/tables_using_tsquery.txt",
ctx->output_dir);
ctx->cwd);
for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
{
@ -222,7 +222,7 @@ old_8_3_check_for_isn_and_int8_passing_mismatch(migratorContext *ctx, Cluster wh
}
snprintf(output_path, sizeof(output_path), "%s/contrib_isn_and_int8_pass_by_value.txt",
ctx->output_dir);
ctx->cwd);
for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
{
@ -312,7 +312,7 @@ old_8_3_rebuild_tsvector_tables(migratorContext *ctx, bool check_mode,
prep_status(ctx, "Checking for tsvector user columns");
snprintf(output_path, sizeof(output_path), "%s/rebuild_tsvector_tables.sql",
ctx->output_dir);
ctx->cwd);
for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
{
@ -457,7 +457,7 @@ old_8_3_invalidate_hash_gin_indexes(migratorContext *ctx, bool check_mode,
prep_status(ctx, "Checking for hash and gin indexes");
snprintf(output_path, sizeof(output_path), "%s/reindex_hash_and_gin.sql",
ctx->output_dir);
ctx->cwd);
for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
{
@ -573,7 +573,7 @@ old_8_3_invalidate_bpchar_pattern_ops_indexes(migratorContext *ctx, bool check_m
prep_status(ctx, "Checking for bpchar_pattern_ops indexes");
snprintf(output_path, sizeof(output_path), "%s/reindex_bpchar_ops.sql",
ctx->output_dir);
ctx->cwd);
for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
{
@ -705,7 +705,7 @@ old_8_3_create_sequence_script(migratorContext *ctx, Cluster whichCluster)
bool found = false;
char *output_path = pg_malloc(ctx, MAXPGPATH);
snprintf(output_path, MAXPGPATH, "%s/adjust_sequences.sql", ctx->output_dir);
snprintf(output_path, MAXPGPATH, "%s/adjust_sequences.sql", ctx->cwd);
prep_status(ctx, "Creating script to adjust sequences");