From 1f7d26f809c2be82a8b816944b8da8cd2c4507d7 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Tue, 4 Jan 2022 09:53:52 +0300 Subject: [PATCH 001/339] [refactor] remove pgut_fopen() --- src/utils/configuration.c | 10 ++++++++-- src/utils/pgut.c | 19 +------------------ src/utils/pgut.h | 5 ----- 3 files changed, 9 insertions(+), 25 deletions(-) diff --git a/src/utils/configuration.c b/src/utils/configuration.c index 04bfbbe3b..8c5127a60 100644 --- a/src/utils/configuration.c +++ b/src/utils/configuration.c @@ -555,8 +555,14 @@ config_read_opt(const char *path, ConfigOption options[], int elevel, if (!options) return parsed_options; - if ((fp = pgut_fopen(path, "rt", missing_ok)) == NULL) - return parsed_options; + if ((fp = fio_open_stream(path, FIO_BACKUP_HOST)) == NULL) + { + if (missing_ok && errno == ENOENT) + return parsed_options; + + elog(ERROR, "could not open file \"%s\": %s", + path, strerror(errno)); + } while (fgets(buf, lengthof(buf), fp)) { diff --git a/src/utils/pgut.c b/src/utils/pgut.c index 2cf0ccbe7..c220b807d 100644 --- a/src/utils/pgut.c +++ b/src/utils/pgut.c @@ -980,7 +980,7 @@ pgut_strndup(const char *str, size_t n) /* * Allocates new string, that contains part of filepath string minus trailing filename string * If trailing filename string not found, returns copy of filepath. - * Result must be free by caller. + * Result must be freed by caller. */ char * pgut_str_strip_trailing_filename(const char *filepath, const char *filename) @@ -993,23 +993,6 @@ pgut_str_strip_trailing_filename(const char *filepath, const char *filename) return pgut_strndup(filepath, fp_len); } -FILE * -pgut_fopen(const char *path, const char *mode, bool missing_ok) -{ - FILE *fp; - - if ((fp = fio_open_stream(path, FIO_BACKUP_HOST)) == NULL) - { - if (missing_ok && errno == ENOENT) - return NULL; - - elog(ERROR, "could not open file \"%s\": %s", - path, strerror(errno)); - } - - return fp; -} - #ifdef WIN32 static int select_win32(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, const struct timeval * timeout); #define select select_win32 diff --git a/src/utils/pgut.h b/src/utils/pgut.h index fa0efe816..638259a3c 100644 --- a/src/utils/pgut.h +++ b/src/utils/pgut.h @@ -69,11 +69,6 @@ extern char *pgut_str_strip_trailing_filename(const char *filepath, const char * #define pgut_new0(type) ((type *) pgut_malloc0(sizeof(type))) #define pgut_newarray(type, n) ((type *) pgut_malloc(sizeof(type) * (n))) -/* - * file operations - */ -extern FILE *pgut_fopen(const char *path, const char *mode, bool missing_ok); - /* * Assert */ From f22c5fcb6b56109690a1febe8327908c86b1fbbc Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 7 Jan 2022 01:38:51 +0300 Subject: [PATCH 002/339] [refactoring] remove duplicate fio_unlink() and fio_delete() and merge into new fio_remove() with proper error checking --- src/archive.c | 74 +++++++++++++++++++++----------- src/catalog.c | 31 +++++++------ src/catchup.c | 8 ++-- src/delete.c | 15 +++---- src/dir.c | 46 ++++---------------- src/merge.c | 16 ++++--- src/pg_probackup.h | 10 ++--- src/restore.c | 8 ++-- src/utils/file.c | 105 ++++++++++++++++++++++++--------------------- src/utils/file.h | 5 +-- 10 files changed, 164 insertions(+), 154 deletions(-) diff --git a/src/archive.c b/src/archive.c index 0f32d9345..9d8d92fde 100644 --- a/src/archive.c +++ b/src/archive.c @@ -3,7 +3,7 @@ * archive.c: - pg_probackup specific archive commands for archive backups. * * - * Portions Copyright (c) 2018-2021, Postgres Professional + * Portions Copyright (c) 2018-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -497,7 +497,8 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d /* Partial segment is considered stale, so reuse it */ elog(LOG, "Reusing stale temp WAL file \"%s\"", to_fullpath_part); - fio_unlink(to_fullpath_part, FIO_BACKUP_HOST); + if (fio_remove(to_fullpath_part, false, FIO_BACKUP_HOST) != 0) + elog(ERROR, "Cannot remove stale temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); out = fio_open(to_fullpath_part, O_RDWR | O_CREAT | O_EXCL | PG_BINARY, FIO_BACKUP_HOST); if (out < 0) @@ -522,7 +523,8 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d /* cleanup */ fclose(in); fio_close(out); - fio_unlink(to_fullpath_part, FIO_BACKUP_HOST); + if (fio_remove(to_fullpath_part, false, FIO_BACKUP_HOST) != 0) + elog(WARNING, "Cannot remove temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); return 1; } else @@ -535,7 +537,8 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d /* Overwriting is forbidden, * so we must unlink partial file and exit with error. */ - fio_unlink(to_fullpath_part, FIO_BACKUP_HOST); + if (fio_remove(to_fullpath_part, false, FIO_BACKUP_HOST) != 0) + elog(WARNING, "Cannot remove temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); elog(ERROR, "WAL file already exists in archive with " "different checksum: \"%s\"", to_fullpath); } @@ -552,16 +555,20 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d if (ferror(in)) { - fio_unlink(to_fullpath_part, FIO_BACKUP_HOST); + int save_errno = errno; + if (fio_remove(to_fullpath_part, false, FIO_BACKUP_HOST) != 0) + elog(WARNING, "Cannot remove temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); elog(ERROR, "Cannot read source file \"%s\": %s", - from_fullpath, strerror(errno)); + from_fullpath, strerror(save_errno)); } if (read_len > 0 && fio_write_async(out, buf, read_len) != read_len) { - fio_unlink(to_fullpath_part, FIO_BACKUP_HOST); + int save_errno = errno; + if (fio_remove(to_fullpath_part, false, FIO_BACKUP_HOST) != 0) + elog(WARNING, "Cannot cleanup temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); elog(ERROR, "Cannot write to destination temp file \"%s\": %s", - to_fullpath_part, strerror(errno)); + to_fullpath_part, strerror(save_errno)); } if (feof(in)) @@ -574,7 +581,8 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d /* Writing is asynchronous in case of push in remote mode, so check agent status */ if (fio_check_error_fd(out, &errmsg)) { - fio_unlink(to_fullpath_part, FIO_BACKUP_HOST); + if (fio_remove(to_fullpath_part, false, FIO_BACKUP_HOST) != 0) + elog(WARNING, "Cannot cleanup temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); elog(ERROR, "Cannot write to the remote file \"%s\": %s", to_fullpath_part, errmsg); } @@ -582,9 +590,11 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d /* close temp file */ if (fio_close(out) != 0) { - fio_unlink(to_fullpath_part, FIO_BACKUP_HOST); + int save_errno = errno; + if (fio_remove(to_fullpath_part, false, FIO_BACKUP_HOST) != 0) + elog(WARNING, "Cannot cleanup temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); elog(ERROR, "Cannot close temp WAL file \"%s\": %s", - to_fullpath_part, strerror(errno)); + to_fullpath_part, strerror(save_errno)); } /* sync temp file to disk */ @@ -602,9 +612,11 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d /* Rename temp file to destination file */ if (fio_rename(to_fullpath_part, to_fullpath, FIO_BACKUP_HOST) < 0) { - fio_unlink(to_fullpath_part, FIO_BACKUP_HOST); + int save_errno = errno; + if (fio_remove(to_fullpath_part, false, FIO_BACKUP_HOST) != 0) + elog(WARNING, "Cannot cleanup temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); elog(ERROR, "Cannot rename file \"%s\" to \"%s\": %s", - to_fullpath_part, to_fullpath, strerror(errno)); + to_fullpath_part, to_fullpath, strerror(save_errno)); } pg_free(buf); @@ -743,7 +755,8 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, /* Partial segment is considered stale, so reuse it */ elog(LOG, "Reusing stale temp WAL file \"%s\"", to_fullpath_gz_part); - fio_unlink(to_fullpath_gz_part, FIO_BACKUP_HOST); + if (fio_remove(to_fullpath_gz_part, false, FIO_BACKUP_HOST) != 0) + elog(ERROR, "Cannot remove stale compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); out = fio_gzopen(to_fullpath_gz_part, PG_BINARY_W, compress_level, FIO_BACKUP_HOST); if (out == NULL) @@ -771,7 +784,8 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, /* cleanup */ fclose(in); fio_gzclose(out); - fio_unlink(to_fullpath_gz_part, FIO_BACKUP_HOST); + if (fio_remove(to_fullpath_gz_part, false, FIO_BACKUP_HOST) != 0) + elog(WARNING, "Cannot remove compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); return 1; } else @@ -784,7 +798,8 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, /* Overwriting is forbidden, * so we must unlink partial file and exit with error. */ - fio_unlink(to_fullpath_gz_part, FIO_BACKUP_HOST); + if (fio_remove(to_fullpath_gz_part, false, FIO_BACKUP_HOST) != 0) + elog(WARNING, "Cannot remove compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); elog(ERROR, "WAL file already exists in archive with " "different checksum: \"%s\"", to_fullpath_gz); } @@ -801,16 +816,20 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, if (ferror(in)) { - fio_unlink(to_fullpath_gz_part, FIO_BACKUP_HOST); + int save_errno = errno; + if (fio_remove(to_fullpath_gz_part, false, FIO_BACKUP_HOST) != 0) + elog(WARNING, "Cannot remove compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); elog(ERROR, "Cannot read from source file \"%s\": %s", - from_fullpath, strerror(errno)); + from_fullpath, strerror(save_errno)); } if (read_len > 0 && fio_gzwrite(out, buf, read_len) != read_len) { - fio_unlink(to_fullpath_gz_part, FIO_BACKUP_HOST); + int save_errno = errno; + if (fio_remove(to_fullpath_gz_part, false, FIO_BACKUP_HOST) != 0) + elog(WARNING, "Cannot cleanup compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); elog(ERROR, "Cannot write to compressed temp WAL file \"%s\": %s", - to_fullpath_gz_part, get_gz_error(out, errno)); + to_fullpath_gz_part, get_gz_error(out, save_errno)); } if (feof(in)) @@ -823,7 +842,8 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, /* Writing is asynchronous in case of push in remote mode, so check agent status */ if (fio_check_error_fd_gz(out, &errmsg)) { - fio_unlink(to_fullpath_gz_part, FIO_BACKUP_HOST); + if (fio_remove(to_fullpath_gz_part, false, FIO_BACKUP_HOST) != 0) + elog(WARNING, "Cannot cleanup remote compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); elog(ERROR, "Cannot write to the remote compressed file \"%s\": %s", to_fullpath_gz_part, errmsg); } @@ -831,9 +851,11 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, /* close temp file, TODO: make it synchronous */ if (fio_gzclose(out) != 0) { - fio_unlink(to_fullpath_gz_part, FIO_BACKUP_HOST); + int save_errno = errno; + if (fio_remove(to_fullpath_gz_part, false, FIO_BACKUP_HOST) != 0) + elog(WARNING, "Cannot cleanup compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); elog(ERROR, "Cannot close compressed temp WAL file \"%s\": %s", - to_fullpath_gz_part, strerror(errno)); + to_fullpath_gz_part, strerror(save_errno)); } /* sync temp file to disk */ @@ -852,9 +874,11 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, /* Rename temp file to destination file */ if (fio_rename(to_fullpath_gz_part, to_fullpath_gz, FIO_BACKUP_HOST) < 0) { - fio_unlink(to_fullpath_gz_part, FIO_BACKUP_HOST); + int save_errno = errno; + if (fio_remove(to_fullpath_gz_part, false, FIO_BACKUP_HOST) != 0) + elog(WARNING, "Cannot cleanup compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); elog(ERROR, "Cannot rename file \"%s\" to \"%s\": %s", - to_fullpath_gz_part, to_fullpath_gz, strerror(errno)); + to_fullpath_gz_part, to_fullpath_gz, strerror(save_errno)); } pg_free(buf); diff --git a/src/catalog.c b/src/catalog.c index b4ed8c189..c575ba4e4 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -3,7 +3,7 @@ * catalog.c: backup catalog operation * * Portions Copyright (c) 2009-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2019, Postgres Professional + * Portions Copyright (c) 2015-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -451,7 +451,7 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) * it. Need a loop because of possible race condition against other * would-be creators. */ - if (fio_unlink(lock_file, FIO_BACKUP_HOST) < 0) + if (fio_remove(lock_file, false, FIO_BACKUP_HOST) < 0) { if (errno == ENOENT) continue; /* race condition, again */ @@ -476,7 +476,8 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) int save_errno = errno; fio_close(fd); - fio_unlink(lock_file, FIO_BACKUP_HOST); + if (fio_remove(lock_file, false, FIO_BACKUP_HOST) != 0) + elog(WARNING, "Cannot remove lock file \"%s\": %s", lock_file, strerror(errno)); /* In lax mode if we failed to grab lock because of 'out of space error', * then treat backup as locked. @@ -494,7 +495,8 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) int save_errno = errno; fio_close(fd); - fio_unlink(lock_file, FIO_BACKUP_HOST); + if (fio_remove(lock_file, false, FIO_BACKUP_HOST) != 0) + elog(WARNING, "Cannot remove lock file \"%s\": %s", lock_file, strerror(errno)); /* In lax mode if we failed to grab lock because of 'out of space error', * then treat backup as locked. @@ -511,9 +513,10 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) { int save_errno = errno; - fio_unlink(lock_file, FIO_BACKUP_HOST); + if (fio_remove(lock_file, false, FIO_BACKUP_HOST) != 0) + elog(WARNING, "Cannot remove lock file \"%s\": %s", lock_file, strerror(errno)); - if (!strict && errno == ENOSPC) + if (!strict && save_errno == ENOSPC) return LOCK_FAIL_ENOSPC; else elog(ERROR, "Could not close lock file \"%s\": %s", @@ -608,8 +611,9 @@ wait_shared_owners(pgBackup *backup) return 1; } - /* unlink shared lock file */ - fio_unlink(lock_file, FIO_BACKUP_HOST); + /* remove shared lock file */ + if (fio_remove(lock_file, true, FIO_BACKUP_HOST) != 0) + elog(ERROR, "Cannot remove shared lock file \"%s\": %s", lock_file, strerror(errno)); return 0; } @@ -727,8 +731,9 @@ release_excl_lock_file(const char *backup_dir) /* TODO Sanity check: maybe we should check, that pid in lock file is my_pid */ - /* unlink pid file */ - fio_unlink(lock_file, FIO_BACKUP_HOST); + /* remove pid file */ + if (fio_remove(lock_file, false, FIO_BACKUP_HOST) != 0) + elog(ERROR, "Cannot remove exclusive lock file \"%s\": %s", lock_file, strerror(errno)); } void @@ -792,7 +797,8 @@ release_shared_lock_file(const char *backup_dir) /* if there is no active pid left, then there is nothing to do */ if (buffer_len == 0) { - fio_unlink(lock_file, FIO_BACKUP_HOST); + if (fio_remove(lock_file, false, FIO_BACKUP_HOST) != 0) + elog(ERROR, "Cannot remove shared lock file \"%s\": %s", lock_file, strerror(errno)); return; } @@ -2462,7 +2468,8 @@ write_backup(pgBackup *backup, bool strict) if (!strict && (save_errno == ENOSPC)) { fclose(fp); - fio_unlink(path_temp, FIO_BACKUP_HOST); + if (fio_remove(path_temp, false, FIO_BACKUP_HOST) != 0) + elog(elevel, "Additionally cannot remove file \"%s\": %s", path_temp, strerror(errno)); return; } } diff --git a/src/catchup.c b/src/catchup.c index 1b8f8084d..04b6150fe 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -2,7 +2,7 @@ * * catchup.c: sync DB cluster * - * Copyright (c) 2021, Postgres Professional + * Copyright (c) 2021-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -930,8 +930,10 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, char fullpath[MAXPGPATH]; join_path_components(fullpath, dest_pgdata, file->rel_path); - fio_delete(file->mode, fullpath, FIO_LOCAL_HOST); - elog(VERBOSE, "Deleted file \"%s\"", fullpath); + if (fio_remove(fullpath, false, FIO_LOCAL_HOST) == 0) + elog(VERBOSE, "Deleted file \"%s\"", fullpath); + else + elog(ERROR, "Cannot delete redundant file in destination \"%s\": %s", fullpath, strerror(errno)); /* shrink dest pgdata list */ pgFileFree(file); diff --git a/src/delete.c b/src/delete.c index 6c70ff81e..98e277ae9 100644 --- a/src/delete.c +++ b/src/delete.c @@ -3,7 +3,7 @@ * delete.c: delete backup files. * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2019, Postgres Professional + * Portions Copyright (c) 2015-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -782,7 +782,8 @@ delete_backup_files(pgBackup *backup) elog(INFO, "Progress: (%zd/%zd). Delete file \"%s\"", i + 1, num_files, full_path); - pgFileDelete(file->mode, full_path); + if (fio_remove(full_path, false, FIO_BACKUP_HOST) != 0) + elog(ERROR, "Cannot remove file or directory \"%s\": %s", full_path, strerror(errno)); } parray_walk(files, pgFileFree); @@ -948,13 +949,11 @@ delete_walfiles_in_tli(InstanceState *instanceState, XLogRecPtr keep_lsn, timeli continue; } - /* unlink segment */ - if (fio_unlink(wal_fullpath, FIO_BACKUP_HOST) < 0) + /* remove segment, missing file is not considered as error condition */ + if (fio_remove(wal_fullpath, true, FIO_BACKUP_HOST) < 0) { - /* Missing file is not considered as error condition */ - if (errno != ENOENT) - elog(ERROR, "Could not remove file \"%s\": %s", - wal_fullpath, strerror(errno)); + elog(ERROR, "Could not remove file \"%s\": %s", + wal_fullpath, strerror(errno)); } else { diff --git a/src/dir.c b/src/dir.c index 4ebe0939b..cb1f2d102 100644 --- a/src/dir.c +++ b/src/dir.c @@ -3,7 +3,7 @@ * dir.c: directory operation utility. * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2019, Postgres Professional + * Portions Copyright (c) 2015-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -226,38 +226,6 @@ pgFileInit(const char *rel_path) return file; } -/* - * Delete file pointed by the pgFile. - * If the pgFile points directory, the directory must be empty. - */ -void -pgFileDelete(mode_t mode, const char *full_path) -{ - if (S_ISDIR(mode)) - { - if (rmdir(full_path) == -1) - { - if (errno == ENOENT) - return; - else if (errno == ENOTDIR) /* could be symbolic link */ - goto delete_file; - - elog(ERROR, "Cannot remove directory \"%s\": %s", - full_path, strerror(errno)); - } - return; - } - -delete_file: - if (remove(full_path) == -1) - { - if (errno == ENOENT) - return; - elog(ERROR, "Cannot remove file \"%s\": %s", full_path, - strerror(errno)); - } -} - /* * Read the local file to compute its CRC. * We cannot make decision about file decompression because @@ -1792,9 +1760,11 @@ write_database_map(pgBackup *backup, parray *database_map, parray *backup_files_ print_database_map(fp, database_map); if (fio_fflush(fp) || fio_fclose(fp)) { - fio_unlink(database_map_path, FIO_BACKUP_HOST); + int save_errno = errno; + if (fio_remove(database_map_path, false, FIO_BACKUP_HOST) != 0) + elog(WARNING, "Cannot cleanup database map \"%s\": %s", database_map_path, strerror(errno)); elog(ERROR, "Cannot write database map \"%s\": %s", - database_map_path, strerror(errno)); + database_map_path, strerror(save_errno)); } /* Add metadata to backup_content.control */ @@ -1888,8 +1858,10 @@ cleanup_tablespace(const char *path) join_path_components(fullpath, path, file->rel_path); - fio_delete(file->mode, fullpath, FIO_DB_HOST); - elog(VERBOSE, "Deleted file \"%s\"", fullpath); + if (fio_remove(fullpath, false, FIO_DB_HOST) == 0) + elog(VERBOSE, "Deleted file \"%s\"", fullpath); + else + elog(ERROR, "Cannot delete file or directory \"%s\": %s", fullpath, strerror(errno)); } parray_walk(files, pgFileFree); diff --git a/src/merge.c b/src/merge.c index ff39c2510..57c4096cf 100644 --- a/src/merge.c +++ b/src/merge.c @@ -2,7 +2,7 @@ * * merge.c: merge FULL and incremental backups * - * Copyright (c) 2018-2019, Postgres Professional + * Copyright (c) 2018-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -809,8 +809,10 @@ merge_chain(InstanceState *instanceState, /* We need full path, file object has relative path */ join_path_components(full_file_path, full_database_dir, full_file->rel_path); - pgFileDelete(full_file->mode, full_file_path); - elog(VERBOSE, "Deleted \"%s\"", full_file_path); + if (fio_remove(full_file_path, false, FIO_BACKUP_HOST) == 0) + elog(VERBOSE, "Deleted \"%s\"", full_file_path); + else + elog(ERROR, "Cannot delete file or directory \"%s\": %s", full_file_path, strerror(errno)); } } @@ -1143,8 +1145,10 @@ remove_dir_with_files(const char *path) join_path_components(full_path, path, file->rel_path); - pgFileDelete(file->mode, full_path); - elog(VERBOSE, "Deleted \"%s\"", full_path); + if (fio_remove(full_path, false, FIO_LOCAL_HOST) == 0) + elog(VERBOSE, "Deleted \"%s\"", full_path); + else + elog(ERROR, "Cannot delete file or directory \"%s\": %s", full_path, strerror(errno)); } /* cleanup */ @@ -1450,4 +1454,4 @@ is_forward_compatible(parray *parent_chain) } return true; -} \ No newline at end of file +} diff --git a/src/pg_probackup.h b/src/pg_probackup.h index b202b6152..269f41adc 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -3,7 +3,7 @@ * pg_probackup.h: Backup/Recovery manager for PostgreSQL. * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2021, Postgres Professional + * Portions Copyright (c) 2015-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -341,8 +341,8 @@ typedef enum ShowFormat #define PROGRAM_VERSION "2.5.4" /* update when remote agent API or behaviour changes */ -#define AGENT_PROTOCOL_VERSION 20501 -#define AGENT_PROTOCOL_VERSION_STR "2.5.1" +#define AGENT_PROTOCOL_VERSION 20600 +#define AGENT_PROTOCOL_VERSION_STR "2.6.0" /* update only when changing storage format */ #define STORAGE_FORMAT_VERSION "2.4.4" @@ -1054,9 +1054,6 @@ extern pgFile *pgFileNew(const char *path, const char *rel_path, bool follow_symlink, int external_dir_num, fio_location location); extern pgFile *pgFileInit(const char *rel_path); -extern void pgFileDelete(mode_t mode, const char *full_path); -extern void fio_pgFileDelete(pgFile *file, const char *full_path); - extern void pgFileFree(void *file); extern pg_crc32 pgFileGetCRC(const char *file_path, bool use_crc32c, bool missing_ok); @@ -1213,7 +1210,6 @@ extern int copy_pages(const char *to_fullpath, const char *from_fullpath, /* FIO */ extern void setMyLocation(ProbackupSubcmd const subcmd); -extern void fio_delete(mode_t mode, const char *fullpath, fio_location location); extern int fio_send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, XLogRecPtr horizonLsn, int calg, int clevel, uint32 checksum_version, bool use_pagemap, BlockNumber *err_blknum, char **errormsg, diff --git a/src/restore.c b/src/restore.c index d8d808a4e..a31ffd983 100644 --- a/src/restore.c +++ b/src/restore.c @@ -3,7 +3,7 @@ * restore.c: restore DB cluster and archived WAL. * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2019, Postgres Professional + * Portions Copyright (c) 2015-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -922,8 +922,10 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, join_path_components(fullpath, pgdata_path, file->rel_path); - fio_delete(file->mode, fullpath, FIO_DB_HOST); - elog(VERBOSE, "Deleted file \"%s\"", fullpath); + if (fio_remove(fullpath, false, FIO_DB_HOST) == 0) + elog(VERBOSE, "Deleted file \"%s\"", fullpath); + else + elog(ERROR, "Cannot delete redundant file \"%s\": %s", fullpath, strerror(errno)); /* shrink pgdata list */ pgFileFree(file); diff --git a/src/utils/file.c b/src/utils/file.c index 7d1df554b..e97924caa 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -176,7 +176,9 @@ fio_safestat(const char *path, struct stat *buf) } #define stat(x, y) fio_safestat(x, y) +#endif /* WIN32 */ +#ifdef WIN32 /* TODO: use real pread on Linux */ static ssize_t pread(int fd, void* buf, size_t size, off_t off) @@ -186,15 +188,16 @@ pread(int fd, void* buf, size_t size, off_t off) return -1; return read(fd, buf, size); } +#endif /* WIN32 */ +#ifdef WIN32 static int remove_file_or_dir(char const* path) { int rc = remove(path); -#ifdef WIN32 + if (rc < 0 && errno == EACCESS) rc = rmdir(path); -#endif return rc; } #else @@ -1385,28 +1388,65 @@ fio_get_crc32(const char *file_path, fio_location location, bool decompress) } } -/* Remove file */ +/* + * Remove file or directory + * if missing_ok, then ignore ENOENT error + */ int -fio_unlink(char const* path, fio_location location) +fio_remove(char const* path, bool missing_ok, fio_location location) { + int result = 0; + if (fio_is_remote(location)) { - fio_header hdr; - size_t path_len = strlen(path) + 1; - hdr.cop = FIO_UNLINK; - hdr.handle = -1; - hdr.size = path_len; + fio_header hdr = { + .cop = FIO_REMOVE, + .handle = -1, + .size = strlen(path) + 1, + .arg = missing_ok ? 1 : 0, + }; IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(fio_stdout, path, path_len), path_len); + IO_CHECK(fio_write_all(fio_stdout, path, hdr.size), hdr.size); - // TODO: error is swallowed ? - return 0; + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + Assert(hdr.cop == FIO_REMOVE); + + if (hdr.arg != 0) + { + errno = hdr.arg; + result = -1; + } } else { - return remove_file_or_dir(path); + if (remove_file_or_dir(path) != 0) + { + if (!missing_ok || errno != ENOENT) + result = -1; + } } + return result; +} + + +static void +fio_remove_impl(char const* path, bool missing_ok, int out) +{ + fio_header hdr = { + .cop = FIO_REMOVE, + .handle = -1, + .size = 0, + .arg = 0, + }; + + if (remove_file_or_dir(path) != 0) + { + if (!missing_ok || errno != ENOENT) + hdr.arg = errno; + } + + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); } /* Create directory @@ -3176,37 +3216,6 @@ fio_check_postmaster_impl(int out, char *buf) IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); } -/* - * Delete file pointed by the pgFile. - * If the pgFile points directory, the directory must be empty. - */ -void -fio_delete(mode_t mode, const char *fullpath, fio_location location) -{ - if (fio_is_remote(location)) - { - fio_header hdr; - - hdr.cop = FIO_DELETE; - hdr.size = strlen(fullpath) + 1; - hdr.arg = mode; - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(fio_stdout, fullpath, hdr.size), hdr.size); - - } - else - pgFileDelete(mode, fullpath); -} - -static void -fio_delete_impl(mode_t mode, char *buf) -{ - char *fullpath = (char*) buf; - - pgFileDelete(mode, fullpath); -} - /* Execute commands at remote host */ void fio_communicate(int in, int out) @@ -3333,8 +3342,8 @@ fio_communicate(int in, int out) case FIO_SYMLINK: /* Create symbolic link */ fio_symlink_impl(out, buf, hdr.arg > 0 ? true : false); break; - case FIO_UNLINK: /* Remove file or directory (TODO: Win32) */ - SYS_CHECK(remove_file_or_dir(buf)); + case FIO_REMOVE: /* Remove file or directory (TODO: Win32) */ + fio_remove_impl(buf, hdr.arg == 1, out); break; case FIO_MKDIR: /* Create directory */ hdr.size = 0; @@ -3396,10 +3405,6 @@ fio_communicate(int in, int out) /* calculate crc32 for a file */ fio_check_postmaster_impl(out, buf); break; - case FIO_DELETE: - /* delete file */ - fio_delete_impl(hdr.arg, buf); - break; case FIO_DISCONNECT: hdr.cop = FIO_DISCONNECTED; IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); diff --git a/src/utils/file.h b/src/utils/file.h index a554b4ab0..3b06752c9 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -20,12 +20,11 @@ typedef enum FIO_SYNC, FIO_RENAME, FIO_SYMLINK, - FIO_UNLINK, + FIO_REMOVE, FIO_MKDIR, FIO_CHMOD, FIO_SEEK, FIO_TRUNCATE, - FIO_DELETE, FIO_PREAD, FIO_READ, FIO_LOAD, @@ -124,7 +123,7 @@ extern pg_crc32 fio_get_crc32(const char *file_path, fio_location location, bool extern int fio_rename(char const* old_path, char const* new_path, fio_location location); extern int fio_symlink(char const* target, char const* link_path, bool overwrite, fio_location location); -extern int fio_unlink(char const* path, fio_location location); +extern int fio_remove(char const* path, bool missing_ok, fio_location location); extern int fio_mkdir(char const* path, int mode, fio_location location); extern int fio_chmod(char const* path, int mode, fio_location location); extern int fio_access(char const* path, int mode, fio_location location); From 5a6bd0190953e0272e45c302d1fb696838761be5 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 7 Jan 2022 04:00:06 +0300 Subject: [PATCH 003/339] [refactoring] move fio_location argument in first place in fio_ functions (part 1) --- src/archive.c | 70 +++++++++++++++++++-------------------- src/backup.c | 14 ++++---- src/catalog.c | 30 ++++++++--------- src/catchup.c | 18 +++++----- src/configure.c | 6 ++-- src/data.c | 12 +++---- src/delete.c | 4 +-- src/dir.c | 24 +++++++------- src/fetch.c | 6 ++-- src/merge.c | 10 +++--- src/parsexlog.c | 12 +++---- src/pg_probackup.c | 8 ++--- src/restore.c | 38 ++++++++++----------- src/util.c | 6 ++-- src/utils/configuration.c | 4 +-- src/utils/file.c | 58 ++++++++++++++++---------------- src/utils/file.h | 59 ++++++++++++++++++++------------- 17 files changed, 196 insertions(+), 183 deletions(-) diff --git a/src/archive.c b/src/archive.c index 9d8d92fde..01ea15a1f 100644 --- a/src/archive.c +++ b/src/archive.c @@ -364,7 +364,7 @@ push_file(WALSegno *xlogfile, const char *archive_status_dir, elog(VERBOSE, "Rename \"%s\" to \"%s\"", wal_file_ready, wal_file_done); /* do not error out, if rename failed */ - if (fio_rename(wal_file_ready, wal_file_done, FIO_DB_HOST) < 0) + if (fio_rename(FIO_DB_HOST, wal_file_ready, wal_file_done) < 0) elog(WARNING, "Cannot rename ready file \"%s\" to \"%s\": %s", wal_file_ready, wal_file_done, strerror(errno)); } @@ -418,7 +418,7 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d snprintf(to_fullpath_part, sizeof(to_fullpath_part), "%s.part", to_fullpath); /* Grab lock by creating temp file in exclusive mode */ - out = fio_open(to_fullpath_part, O_RDWR | O_CREAT | O_EXCL | PG_BINARY, FIO_BACKUP_HOST); + out = fio_open(FIO_BACKUP_HOST, to_fullpath_part, O_RDWR | O_CREAT | O_EXCL | PG_BINARY); if (out < 0) { if (errno != EEXIST) @@ -444,12 +444,12 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d while (partial_try_count < archive_timeout) { - if (fio_stat(to_fullpath_part, &st, false, FIO_BACKUP_HOST) < 0) + if (fio_stat(FIO_BACKUP_HOST, to_fullpath_part, &st, false) < 0) { if (errno == ENOENT) { //part file is gone, lets try to grab it - out = fio_open(to_fullpath_part, O_RDWR | O_CREAT | O_EXCL | PG_BINARY, FIO_BACKUP_HOST); + out = fio_open(FIO_BACKUP_HOST, to_fullpath_part, O_RDWR | O_CREAT | O_EXCL | PG_BINARY); if (out < 0) { if (errno != EEXIST) @@ -497,10 +497,10 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d /* Partial segment is considered stale, so reuse it */ elog(LOG, "Reusing stale temp WAL file \"%s\"", to_fullpath_part); - if (fio_remove(to_fullpath_part, false, FIO_BACKUP_HOST) != 0) + if (fio_remove(FIO_BACKUP_HOST, to_fullpath_part, false) != 0) elog(ERROR, "Cannot remove stale temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); - out = fio_open(to_fullpath_part, O_RDWR | O_CREAT | O_EXCL | PG_BINARY, FIO_BACKUP_HOST); + out = fio_open(FIO_BACKUP_HOST, to_fullpath_part, O_RDWR | O_CREAT | O_EXCL | PG_BINARY); if (out < 0) elog(ERROR, "Cannot open temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); } @@ -513,8 +513,8 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d pg_crc32 crc32_src; pg_crc32 crc32_dst; - crc32_src = fio_get_crc32(from_fullpath, FIO_DB_HOST, false); - crc32_dst = fio_get_crc32(to_fullpath, FIO_BACKUP_HOST, false); + crc32_src = fio_get_crc32(FIO_DB_HOST, from_fullpath, false); + crc32_dst = fio_get_crc32(FIO_BACKUP_HOST, to_fullpath, false); if (crc32_src == crc32_dst) { @@ -523,7 +523,7 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d /* cleanup */ fclose(in); fio_close(out); - if (fio_remove(to_fullpath_part, false, FIO_BACKUP_HOST) != 0) + if (fio_remove(FIO_BACKUP_HOST, to_fullpath_part, false) != 0) elog(WARNING, "Cannot remove temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); return 1; } @@ -537,7 +537,7 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d /* Overwriting is forbidden, * so we must unlink partial file and exit with error. */ - if (fio_remove(to_fullpath_part, false, FIO_BACKUP_HOST) != 0) + if (fio_remove(FIO_BACKUP_HOST, to_fullpath_part, false) != 0) elog(WARNING, "Cannot remove temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); elog(ERROR, "WAL file already exists in archive with " "different checksum: \"%s\"", to_fullpath); @@ -556,7 +556,7 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d if (ferror(in)) { int save_errno = errno; - if (fio_remove(to_fullpath_part, false, FIO_BACKUP_HOST) != 0) + if (fio_remove(FIO_BACKUP_HOST, to_fullpath_part, false) != 0) elog(WARNING, "Cannot remove temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); elog(ERROR, "Cannot read source file \"%s\": %s", from_fullpath, strerror(save_errno)); @@ -565,7 +565,7 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d if (read_len > 0 && fio_write_async(out, buf, read_len) != read_len) { int save_errno = errno; - if (fio_remove(to_fullpath_part, false, FIO_BACKUP_HOST) != 0) + if (fio_remove(FIO_BACKUP_HOST, to_fullpath_part, false) != 0) elog(WARNING, "Cannot cleanup temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); elog(ERROR, "Cannot write to destination temp file \"%s\": %s", to_fullpath_part, strerror(save_errno)); @@ -581,7 +581,7 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d /* Writing is asynchronous in case of push in remote mode, so check agent status */ if (fio_check_error_fd(out, &errmsg)) { - if (fio_remove(to_fullpath_part, false, FIO_BACKUP_HOST) != 0) + if (fio_remove(FIO_BACKUP_HOST, to_fullpath_part, false) != 0) elog(WARNING, "Cannot cleanup temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); elog(ERROR, "Cannot write to the remote file \"%s\": %s", to_fullpath_part, errmsg); @@ -591,7 +591,7 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d if (fio_close(out) != 0) { int save_errno = errno; - if (fio_remove(to_fullpath_part, false, FIO_BACKUP_HOST) != 0) + if (fio_remove(FIO_BACKUP_HOST, to_fullpath_part, false) != 0) elog(WARNING, "Cannot cleanup temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); elog(ERROR, "Cannot close temp WAL file \"%s\": %s", to_fullpath_part, strerror(save_errno)); @@ -600,7 +600,7 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d /* sync temp file to disk */ if (!no_sync) { - if (fio_sync(to_fullpath_part, FIO_BACKUP_HOST) != 0) + if (fio_sync(FIO_BACKUP_HOST, to_fullpath_part) != 0) elog(ERROR, "Failed to sync file \"%s\": %s", to_fullpath_part, strerror(errno)); } @@ -610,10 +610,10 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d //copy_file_attributes(from_path, FIO_DB_HOST, to_path_temp, FIO_BACKUP_HOST, true); /* Rename temp file to destination file */ - if (fio_rename(to_fullpath_part, to_fullpath, FIO_BACKUP_HOST) < 0) + if (fio_rename(FIO_BACKUP_HOST, to_fullpath_part, to_fullpath) < 0) { int save_errno = errno; - if (fio_remove(to_fullpath_part, false, FIO_BACKUP_HOST) != 0) + if (fio_remove(FIO_BACKUP_HOST, to_fullpath_part, false) != 0) elog(WARNING, "Cannot cleanup temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); elog(ERROR, "Cannot rename file \"%s\" to \"%s\": %s", to_fullpath_part, to_fullpath, strerror(save_errno)); @@ -675,7 +675,7 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, setvbuf(in, NULL, _IONBF, BUFSIZ); /* Grab lock by creating temp file in exclusive mode */ - out = fio_gzopen(to_fullpath_gz_part, PG_BINARY_W, compress_level, FIO_BACKUP_HOST); + out = fio_gzopen(FIO_BACKUP_HOST, to_fullpath_gz_part, PG_BINARY_W, compress_level); if (out == NULL) { if (errno != EEXIST) @@ -701,12 +701,12 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, while (partial_try_count < archive_timeout) { - if (fio_stat(to_fullpath_gz_part, &st, false, FIO_BACKUP_HOST) < 0) + if (fio_stat(FIO_BACKUP_HOST, to_fullpath_gz_part, &st, false) < 0) { if (errno == ENOENT) { //part file is gone, lets try to grab it - out = fio_gzopen(to_fullpath_gz_part, PG_BINARY_W, compress_level, FIO_BACKUP_HOST); + out = fio_gzopen(FIO_BACKUP_HOST, to_fullpath_gz_part, PG_BINARY_W, compress_level); if (out == NULL) { if (errno != EEXIST) @@ -755,10 +755,10 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, /* Partial segment is considered stale, so reuse it */ elog(LOG, "Reusing stale temp WAL file \"%s\"", to_fullpath_gz_part); - if (fio_remove(to_fullpath_gz_part, false, FIO_BACKUP_HOST) != 0) + if (fio_remove(FIO_BACKUP_HOST, to_fullpath_gz_part, false) != 0) elog(ERROR, "Cannot remove stale compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); - out = fio_gzopen(to_fullpath_gz_part, PG_BINARY_W, compress_level, FIO_BACKUP_HOST); + out = fio_gzopen(FIO_BACKUP_HOST, to_fullpath_gz_part, PG_BINARY_W, compress_level); if (out == NULL) elog(ERROR, "Cannot open temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); @@ -774,8 +774,8 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, pg_crc32 crc32_dst; /* TODO: what if one of them goes missing? */ - crc32_src = fio_get_crc32(from_fullpath, FIO_DB_HOST, false); - crc32_dst = fio_get_crc32(to_fullpath_gz, FIO_BACKUP_HOST, true); + crc32_src = fio_get_crc32(FIO_DB_HOST, from_fullpath, false); + crc32_dst = fio_get_crc32(FIO_BACKUP_HOST, to_fullpath_gz, true); if (crc32_src == crc32_dst) { @@ -784,7 +784,7 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, /* cleanup */ fclose(in); fio_gzclose(out); - if (fio_remove(to_fullpath_gz_part, false, FIO_BACKUP_HOST) != 0) + if (fio_remove(FIO_BACKUP_HOST, to_fullpath_gz_part, false) != 0) elog(WARNING, "Cannot remove compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); return 1; } @@ -798,7 +798,7 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, /* Overwriting is forbidden, * so we must unlink partial file and exit with error. */ - if (fio_remove(to_fullpath_gz_part, false, FIO_BACKUP_HOST) != 0) + if (fio_remove(FIO_BACKUP_HOST, to_fullpath_gz_part, false) != 0) elog(WARNING, "Cannot remove compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); elog(ERROR, "WAL file already exists in archive with " "different checksum: \"%s\"", to_fullpath_gz); @@ -817,7 +817,7 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, if (ferror(in)) { int save_errno = errno; - if (fio_remove(to_fullpath_gz_part, false, FIO_BACKUP_HOST) != 0) + if (fio_remove(FIO_BACKUP_HOST, to_fullpath_gz_part, false) != 0) elog(WARNING, "Cannot remove compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); elog(ERROR, "Cannot read from source file \"%s\": %s", from_fullpath, strerror(save_errno)); @@ -826,7 +826,7 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, if (read_len > 0 && fio_gzwrite(out, buf, read_len) != read_len) { int save_errno = errno; - if (fio_remove(to_fullpath_gz_part, false, FIO_BACKUP_HOST) != 0) + if (fio_remove(FIO_BACKUP_HOST, to_fullpath_gz_part, false) != 0) elog(WARNING, "Cannot cleanup compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); elog(ERROR, "Cannot write to compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, get_gz_error(out, save_errno)); @@ -842,7 +842,7 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, /* Writing is asynchronous in case of push in remote mode, so check agent status */ if (fio_check_error_fd_gz(out, &errmsg)) { - if (fio_remove(to_fullpath_gz_part, false, FIO_BACKUP_HOST) != 0) + if (fio_remove(FIO_BACKUP_HOST, to_fullpath_gz_part, false) != 0) elog(WARNING, "Cannot cleanup remote compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); elog(ERROR, "Cannot write to the remote compressed file \"%s\": %s", to_fullpath_gz_part, errmsg); @@ -852,7 +852,7 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, if (fio_gzclose(out) != 0) { int save_errno = errno; - if (fio_remove(to_fullpath_gz_part, false, FIO_BACKUP_HOST) != 0) + if (fio_remove(FIO_BACKUP_HOST, to_fullpath_gz_part, false) != 0) elog(WARNING, "Cannot cleanup compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); elog(ERROR, "Cannot close compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(save_errno)); @@ -861,7 +861,7 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, /* sync temp file to disk */ if (!no_sync) { - if (fio_sync(to_fullpath_gz_part, FIO_BACKUP_HOST) != 0) + if (fio_sync(FIO_BACKUP_HOST, to_fullpath_gz_part) != 0) elog(ERROR, "Failed to sync file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); } @@ -872,10 +872,10 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, //copy_file_attributes(from_path, FIO_DB_HOST, to_path_temp, FIO_BACKUP_HOST, true); /* Rename temp file to destination file */ - if (fio_rename(to_fullpath_gz_part, to_fullpath_gz, FIO_BACKUP_HOST) < 0) + if (fio_rename(FIO_BACKUP_HOST, to_fullpath_gz_part, to_fullpath_gz) < 0) { int save_errno = errno; - if (fio_remove(to_fullpath_gz_part, false, FIO_BACKUP_HOST) != 0) + if (fio_remove(FIO_BACKUP_HOST, to_fullpath_gz_part, false) != 0) elog(WARNING, "Cannot cleanup compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); elog(ERROR, "Cannot rename file \"%s\" to \"%s\": %s", to_fullpath_gz_part, to_fullpath_gz, strerror(save_errno)); @@ -913,7 +913,7 @@ get_gz_error(gzFile gzf, int errnum) //{ // struct stat st; // -// if (fio_stat(from_path, &st, true, from_location) == -1) +// if (fio_stat(from_location, from_path, &st, true) == -1) // { // if (unlink_on_error) // fio_unlink(to_path, to_location); @@ -921,7 +921,7 @@ get_gz_error(gzFile gzf, int errnum) // from_path, strerror(errno)); // } // -// if (fio_chmod(to_path, st.st_mode, to_location) == -1) +// if (fio_chmod(to_location, to_path, st.st_mode) == -1) // { // if (unlink_on_error) // fio_unlink(to_path, to_location); diff --git a/src/backup.c b/src/backup.c index c575865c4..2a5d0a30f 100644 --- a/src/backup.c +++ b/src/backup.c @@ -3,7 +3,7 @@ * backup.c: backup DB cluster, archived WAL * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2019, Postgres Professional + * Portions Copyright (c) 2015-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -242,7 +242,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, if (current.backup_mode == BACKUP_MODE_DIFF_PAGE || !current.stream) { /* Check that archive_dir can be reached */ - if (fio_access(instanceState->instance_wal_subdir_path, F_OK, FIO_BACKUP_HOST) != 0) + if (fio_access(FIO_BACKUP_HOST, instanceState->instance_wal_subdir_path, F_OK) != 0) elog(ERROR, "WAL archive directory is not accessible \"%s\": %s", instanceState->instance_wal_subdir_path, strerror(errno)); @@ -260,7 +260,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, char stream_xlog_path[MAXPGPATH]; join_path_components(stream_xlog_path, current.database_dir, PG_XLOG_DIR); - fio_mkdir(stream_xlog_path, DIR_PERMISSION, FIO_BACKUP_HOST); + fio_mkdir(FIO_BACKUP_HOST, stream_xlog_path, DIR_PERMISSION); start_WAL_streaming(backup_conn, stream_xlog_path, &instance_config.conn_opt, current.start_lsn, current.tli, true); @@ -413,7 +413,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, join_path_components(dirpath, current.database_dir, file->rel_path); elog(VERBOSE, "Create directory '%s'", dirpath); - fio_mkdir(dirpath, DIR_PERMISSION, FIO_BACKUP_HOST); + fio_mkdir(FIO_BACKUP_HOST, dirpath, DIR_PERMISSION); } } @@ -528,7 +528,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, { cleanup_header_map(&(current.hdr_map)); - if (fio_sync(current.hdr_map.path, FIO_BACKUP_HOST) != 0) + if (fio_sync(FIO_BACKUP_HOST, current.hdr_map.path) != 0) elog(ERROR, "Cannot sync file \"%s\": %s", current.hdr_map.path, strerror(errno)); } @@ -587,7 +587,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, join_path_components(to_fullpath, external_dst, file->rel_path); } - if (fio_sync(to_fullpath, FIO_BACKUP_HOST) != 0) + if (fio_sync(FIO_BACKUP_HOST, to_fullpath) != 0) elog(ERROR, "Cannot sync file \"%s\": %s", to_fullpath, strerror(errno)); } @@ -1788,7 +1788,7 @@ pg_stop_backup_write_file_helper(const char *path, const char *filename, const c char full_filename[MAXPGPATH]; join_path_components(full_filename, path, filename); - fp = fio_fopen(full_filename, PG_BINARY_W, FIO_BACKUP_HOST); + fp = fio_fopen(FIO_BACKUP_HOST, full_filename, PG_BINARY_W); if (fp == NULL) elog(ERROR, "can't open %s file \"%s\": %s", error_msg_filename, full_filename, strerror(errno)); diff --git a/src/catalog.c b/src/catalog.c index c575ba4e4..af7e3b6d5 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -326,7 +326,7 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) * Think not to make the file protection weaker than 0600. See * comments below. */ - fd = fio_open(lock_file, O_RDWR | O_CREAT | O_EXCL, FIO_BACKUP_HOST); + fd = fio_open(FIO_BACKUP_HOST, lock_file, O_RDWR | O_CREAT | O_EXCL); if (fd >= 0) break; /* Success; exit the retry loop */ @@ -451,7 +451,7 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) * it. Need a loop because of possible race condition against other * would-be creators. */ - if (fio_remove(lock_file, false, FIO_BACKUP_HOST) < 0) + if (fio_remove(FIO_BACKUP_HOST, lock_file, false) < 0) { if (errno == ENOENT) continue; /* race condition, again */ @@ -476,7 +476,7 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) int save_errno = errno; fio_close(fd); - if (fio_remove(lock_file, false, FIO_BACKUP_HOST) != 0) + if (fio_remove(FIO_BACKUP_HOST, lock_file, false) != 0) elog(WARNING, "Cannot remove lock file \"%s\": %s", lock_file, strerror(errno)); /* In lax mode if we failed to grab lock because of 'out of space error', @@ -495,7 +495,7 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) int save_errno = errno; fio_close(fd); - if (fio_remove(lock_file, false, FIO_BACKUP_HOST) != 0) + if (fio_remove(FIO_BACKUP_HOST, lock_file, false) != 0) elog(WARNING, "Cannot remove lock file \"%s\": %s", lock_file, strerror(errno)); /* In lax mode if we failed to grab lock because of 'out of space error', @@ -513,7 +513,7 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) { int save_errno = errno; - if (fio_remove(lock_file, false, FIO_BACKUP_HOST) != 0) + if (fio_remove(FIO_BACKUP_HOST, lock_file, false) != 0) elog(WARNING, "Cannot remove lock file \"%s\": %s", lock_file, strerror(errno)); if (!strict && save_errno == ENOSPC) @@ -612,7 +612,7 @@ wait_shared_owners(pgBackup *backup) } /* remove shared lock file */ - if (fio_remove(lock_file, true, FIO_BACKUP_HOST) != 0) + if (fio_remove(FIO_BACKUP_HOST, lock_file, true) != 0) elog(ERROR, "Cannot remove shared lock file \"%s\": %s", lock_file, strerror(errno)); return 0; } @@ -732,7 +732,7 @@ release_excl_lock_file(const char *backup_dir) /* TODO Sanity check: maybe we should check, that pid in lock file is my_pid */ /* remove pid file */ - if (fio_remove(lock_file, false, FIO_BACKUP_HOST) != 0) + if (fio_remove(FIO_BACKUP_HOST, lock_file, false) != 0) elog(ERROR, "Cannot remove exclusive lock file \"%s\": %s", lock_file, strerror(errno)); } @@ -797,7 +797,7 @@ release_shared_lock_file(const char *backup_dir) /* if there is no active pid left, then there is nothing to do */ if (buffer_len == 0) { - if (fio_remove(lock_file, false, FIO_BACKUP_HOST) != 0) + if (fio_remove(FIO_BACKUP_HOST, lock_file, false) != 0) elog(ERROR, "Cannot remove shared lock file \"%s\": %s", lock_file, strerror(errno)); return; } @@ -852,7 +852,7 @@ IsDir(const char *dirpath, const char *entry, fio_location location) join_path_components(path, dirpath, entry); - return fio_stat(path, &st, false, location) == 0 && S_ISDIR(st.st_mode); + return fio_stat(location, path, &st, false) == 0 && S_ISDIR(st.st_mode); } /* @@ -939,7 +939,7 @@ catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id int i; /* open backup instance backups directory */ - data_dir = fio_opendir(instanceState->instance_backup_subdir_path, FIO_BACKUP_HOST); + data_dir = fio_opendir(FIO_BACKUP_HOST, instanceState->instance_backup_subdir_path); if (data_dir == NULL) { elog(WARNING, "cannot open directory \"%s\": %s", instanceState->instance_backup_subdir_path, @@ -1056,7 +1056,7 @@ get_backup_filelist(pgBackup *backup, bool strict) join_path_components(backup_filelist_path, backup->root_dir, DATABASE_FILE_LIST); - fp = fio_open_stream(backup_filelist_path, FIO_BACKUP_HOST); + fp = fio_open_stream(FIO_BACKUP_HOST, backup_filelist_path); if (fp == NULL) elog(ERROR, "cannot open \"%s\": %s", backup_filelist_path, strerror(errno)); @@ -1467,7 +1467,7 @@ pgBackupCreateDir(pgBackup *backup, const char *backup_instance_path) char path[MAXPGPATH]; join_path_components(path, backup->root_dir, parray_get(subdirs, i)); - fio_mkdir(path, DIR_PERMISSION, FIO_BACKUP_HOST); + fio_mkdir(FIO_BACKUP_HOST, path, DIR_PERMISSION); } free_dir_list(subdirs); @@ -2468,7 +2468,7 @@ write_backup(pgBackup *backup, bool strict) if (!strict && (save_errno == ENOSPC)) { fclose(fp); - if (fio_remove(path_temp, false, FIO_BACKUP_HOST) != 0) + if (fio_remove(FIO_BACKUP_HOST, path_temp, false) != 0) elog(elevel, "Additionally cannot remove file \"%s\": %s", path_temp, strerror(errno)); return; } @@ -2478,7 +2478,7 @@ write_backup(pgBackup *backup, bool strict) elog(ERROR, "Cannot close control file \"%s\": %s", path_temp, strerror(errno)); - if (fio_sync(path_temp, FIO_BACKUP_HOST) < 0) + if (fio_sync(FIO_BACKUP_HOST, path_temp) < 0) elog(ERROR, "Cannot sync control file \"%s\": %s", path_temp, strerror(errno)); @@ -2678,7 +2678,7 @@ readBackupControlFile(const char *path) }; pgBackupInit(backup); - if (fio_access(path, F_OK, FIO_BACKUP_HOST) != 0) + if (fio_access(FIO_BACKUP_HOST, path, F_OK) != 0) { elog(WARNING, "Control file \"%s\" doesn't exist", path); pgBackupFree(backup); diff --git a/src/catchup.c b/src/catchup.c index 04b6150fe..f100964ef 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -154,7 +154,7 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, char backup_label_filename[MAXPGPATH]; join_path_components(backup_label_filename, dest_pgdata, PG_BACKUP_LABEL_FILE); - if (fio_access(backup_label_filename, F_OK, FIO_LOCAL_HOST) == 0) + if (fio_access(FIO_LOCAL_HOST, backup_label_filename, F_OK) == 0) elog(ERROR, "Destination directory contains \"" PG_BACKUP_LABEL_FILE "\" file"); } @@ -555,7 +555,7 @@ catchup_sync_destination_files(const char* pgdata_path, fio_location location, p Assert(file->external_dir_num == 0); join_path_components(fullpath, pgdata_path, file->rel_path); - if (fio_sync(fullpath, location) != 0) + if (fio_sync(location, fullpath) != 0) elog(ERROR, "Cannot sync file \"%s\": %s", fullpath, strerror(errno)); } @@ -563,7 +563,7 @@ catchup_sync_destination_files(const char* pgdata_path, fio_location location, p * sync pg_control file */ join_path_components(fullpath, pgdata_path, pg_control_file->rel_path); - if (fio_sync(fullpath, location) != 0) + if (fio_sync(location, fullpath) != 0) elog(ERROR, "Cannot sync file \"%s\": %s", fullpath, strerror(errno)); time(&end_time); @@ -706,7 +706,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, /* Start stream replication */ join_path_components(dest_xlog_path, dest_pgdata, PG_XLOG_DIR); - fio_mkdir(dest_xlog_path, DIR_PERMISSION, FIO_LOCAL_HOST); + fio_mkdir(FIO_LOCAL_HOST, dest_xlog_path, DIR_PERMISSION); start_WAL_streaming(source_conn, dest_xlog_path, &instance_config.conn_opt, current.start_lsn, current.tli, false); @@ -822,7 +822,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, join_path_components(dirpath, dest_pgdata, file->rel_path); elog(VERBOSE, "Create directory '%s'", dirpath); - fio_mkdir(dirpath, DIR_PERMISSION, FIO_LOCAL_HOST); + fio_mkdir(FIO_LOCAL_HOST, dirpath, DIR_PERMISSION); } else { @@ -835,7 +835,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, char source_full_path[MAXPGPATH]; char symlink_content[MAXPGPATH]; join_path_components(source_full_path, source_pgdata, file->rel_path); - fio_readlink(source_full_path, symlink_content, sizeof(symlink_content), FIO_DB_HOST); + fio_readlink(FIO_DB_HOST, source_full_path, symlink_content, sizeof(symlink_content)); /* we checked that mapping exists in preflight_checks for local catchup */ linked_path = get_tablespace_mapping(symlink_content); elog(INFO, "Map tablespace full_path: \"%s\" old_symlink_content: \"%s\" new_symlink_content: \"%s\"\n", @@ -854,12 +854,12 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, linked_path, to_path); /* create tablespace directory */ - if (fio_mkdir(linked_path, file->mode, FIO_LOCAL_HOST) != 0) + if (fio_mkdir(FIO_LOCAL_HOST, linked_path, file->mode) != 0) elog(ERROR, "Could not create tablespace directory \"%s\": %s", linked_path, strerror(errno)); /* create link to linked_path */ - if (fio_symlink(linked_path, to_path, true, FIO_LOCAL_HOST) < 0) + if (fio_symlink(FIO_LOCAL_HOST, linked_path, to_path, true) < 0) elog(ERROR, "Could not create symbolic link \"%s\" -> \"%s\": %s", linked_path, to_path, strerror(errno)); } @@ -930,7 +930,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, char fullpath[MAXPGPATH]; join_path_components(fullpath, dest_pgdata, file->rel_path); - if (fio_remove(fullpath, false, FIO_LOCAL_HOST) == 0) + if (fio_remove(FIO_LOCAL_HOST, fullpath, false) == 0) elog(VERBOSE, "Deleted file \"%s\"", fullpath); else elog(ERROR, "Cannot delete redundant file in destination \"%s\": %s", fullpath, strerror(errno)); diff --git a/src/configure.c b/src/configure.c index 9ffe2d7a7..3871aa8b9 100644 --- a/src/configure.c +++ b/src/configure.c @@ -2,7 +2,7 @@ * * configure.c: - manage backup catalog. * - * Copyright (c) 2017-2019, Postgres Professional + * Copyright (c) 2017-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -334,7 +334,7 @@ do_set_config(InstanceState *instanceState, bool missing_ok) if (fclose(fp)) elog(ERROR, "Cannot close configuration file: \"%s\"", path_temp); - if (fio_sync(path_temp, FIO_LOCAL_HOST) != 0) + if (fio_sync(FIO_LOCAL_HOST, path_temp) != 0) elog(ERROR, "Failed to sync temp configuration file \"%s\": %s", path_temp, strerror(errno)); @@ -602,7 +602,7 @@ readInstanceConfigFile(InstanceState *instanceState) init_config(instance, instanceState->instance_name); - if (fio_access(instanceState->instance_config_path, F_OK, FIO_BACKUP_HOST) != 0) + if (fio_access(FIO_BACKUP_HOST, instanceState->instance_config_path, F_OK) != 0) { elog(WARNING, "Control file \"%s\" doesn't exist", instanceState->instance_config_path); pfree(instance); diff --git a/src/data.c b/src/data.c index f02e3fd14..41615fb33 100644 --- a/src/data.c +++ b/src/data.c @@ -3,7 +3,7 @@ * data.c: utils to parse and backup data pages * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2019, Postgres Professional + * Portions Copyright (c) 2015-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -800,7 +800,7 @@ backup_non_data_file(pgFile *file, pgFile *prev_file, file->mtime <= parent_backup_time)) { - file->crc = fio_get_crc32(from_fullpath, FIO_DB_HOST, false); + file->crc = fio_get_crc32(FIO_DB_HOST, from_fullpath, false); /* ...and checksum is the same... */ if (EQ_TRADITIONAL_CRC32(file->crc, prev_file->crc)) @@ -1325,7 +1325,7 @@ restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, if (already_exists) { /* compare checksums of already existing file and backup file */ - pg_crc32 file_crc = fio_get_crc32(to_fullpath, FIO_DB_HOST, false); + pg_crc32 file_crc = fio_get_crc32(FIO_DB_HOST, to_fullpath, false); if (file_crc == tmp_file->crc) { @@ -1522,14 +1522,14 @@ create_empty_file(fio_location from_location, const char *to_root, /* open file for write */ join_path_components(to_path, to_root, file->rel_path); - out = fio_fopen(to_path, PG_BINARY_W, to_location); + out = fio_fopen(to_location, to_path, PG_BINARY_W); if (out == NULL) elog(ERROR, "Cannot open destination file \"%s\": %s", to_path, strerror(errno)); /* update file permission */ - if (fio_chmod(to_path, file->mode, to_location) == -1) + if (fio_chmod(to_location, to_path, file->mode) == -1) elog(ERROR, "Cannot change mode of \"%s\": %s", to_path, strerror(errno)); @@ -2268,7 +2268,7 @@ copy_pages(const char *to_fullpath, const char *from_fullpath, setvbuf(in, in_buf, _IOFBF, STDIO_BUFSIZE); } - out = fio_fopen(to_fullpath, PG_BINARY_R "+", FIO_BACKUP_HOST); + out = fio_fopen(FIO_BACKUP_HOST, to_fullpath, PG_BINARY_R "+"); if (out == NULL) elog(ERROR, "Cannot open destination file \"%s\": %s", to_fullpath, strerror(errno)); diff --git a/src/delete.c b/src/delete.c index 98e277ae9..867f6ba11 100644 --- a/src/delete.c +++ b/src/delete.c @@ -782,7 +782,7 @@ delete_backup_files(pgBackup *backup) elog(INFO, "Progress: (%zd/%zd). Delete file \"%s\"", i + 1, num_files, full_path); - if (fio_remove(full_path, false, FIO_BACKUP_HOST) != 0) + if (fio_remove(FIO_BACKUP_HOST, full_path, false) != 0) elog(ERROR, "Cannot remove file or directory \"%s\": %s", full_path, strerror(errno)); } @@ -950,7 +950,7 @@ delete_walfiles_in_tli(InstanceState *instanceState, XLogRecPtr keep_lsn, timeli } /* remove segment, missing file is not considered as error condition */ - if (fio_remove(wal_fullpath, true, FIO_BACKUP_HOST) < 0) + if (fio_remove(FIO_BACKUP_HOST, wal_fullpath, true) < 0) { elog(ERROR, "Could not remove file \"%s\": %s", wal_fullpath, strerror(errno)); diff --git a/src/dir.c b/src/dir.c index cb1f2d102..e68ae6a17 100644 --- a/src/dir.c +++ b/src/dir.c @@ -173,7 +173,7 @@ pgFileNew(const char *path, const char *rel_path, bool follow_symlink, pgFile *file; /* stat the file */ - if (fio_stat(path, &st, follow_symlink, location) < 0) + if (fio_stat(location, path, &st, follow_symlink) < 0) { /* file not found is not an error case */ if (errno == ENOENT) @@ -800,7 +800,7 @@ dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, elog(ERROR, "\"%s\" is not a directory", parent_dir); /* Open directory and list contents */ - dir = fio_opendir(parent_dir, location); + dir = fio_opendir(location, parent_dir); if (dir == NULL) { if (errno == ENOENT) @@ -1106,10 +1106,10 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba linked_path, to_path); /* create tablespace directory */ - fio_mkdir(linked_path, pg_tablespace_mode, location); + fio_mkdir(location, linked_path, pg_tablespace_mode); /* create link to linked_path */ - if (fio_symlink(linked_path, to_path, incremental, location) < 0) + if (fio_symlink(location, linked_path, to_path, incremental) < 0) elog(ERROR, "Could not create symbolic link \"%s\": %s", to_path, strerror(errno)); @@ -1124,7 +1124,7 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba join_path_components(to_path, data_dir, dir->rel_path); // TODO check exit code - fio_mkdir(to_path, dir->mode, location); + fio_mkdir(location, to_path, dir->mode); } if (extract_tablespaces) @@ -1149,7 +1149,7 @@ read_tablespace_map(parray *links, const char *backup_dir) join_path_components(db_path, backup_dir, DATABASE_DIR); join_path_components(map_path, db_path, PG_TABLESPACE_MAP_FILE); - fp = fio_open_stream(map_path, FIO_BACKUP_HOST); + fp = fio_open_stream(FIO_BACKUP_HOST, map_path); if (fp == NULL) elog(ERROR, "Cannot open tablespace map file \"%s\": %s", map_path, strerror(errno)); @@ -1587,7 +1587,7 @@ dir_is_empty(const char *path, fio_location location) DIR *dir; struct dirent *dir_ent; - dir = fio_opendir(path, location); + dir = fio_opendir(location, path); if (dir == NULL) { /* Directory in path doesn't exist */ @@ -1624,7 +1624,7 @@ fileExists(const char *path, fio_location location) { struct stat buf; - if (fio_stat(path, &buf, true, location) == -1 && errno == ENOENT) + if (fio_stat(location, path, &buf, true) == -1 && errno == ENOENT) return false; else if (!S_ISREG(buf.st_mode)) return false; @@ -1752,7 +1752,7 @@ write_database_map(pgBackup *backup, parray *database_map, parray *backup_files_ join_path_components(database_dir, backup->root_dir, DATABASE_DIR); join_path_components(database_map_path, database_dir, DATABASE_MAP); - fp = fio_fopen(database_map_path, PG_BINARY_W, FIO_BACKUP_HOST); + fp = fio_fopen(FIO_BACKUP_HOST, database_map_path, PG_BINARY_W); if (fp == NULL) elog(ERROR, "Cannot open database map \"%s\": %s", database_map_path, strerror(errno)); @@ -1761,7 +1761,7 @@ write_database_map(pgBackup *backup, parray *database_map, parray *backup_files_ if (fio_fflush(fp) || fio_fclose(fp)) { int save_errno = errno; - if (fio_remove(database_map_path, false, FIO_BACKUP_HOST) != 0) + if (fio_remove(FIO_BACKUP_HOST, database_map_path, false) != 0) elog(WARNING, "Cannot cleanup database map \"%s\": %s", database_map_path, strerror(errno)); elog(ERROR, "Cannot write database map \"%s\": %s", database_map_path, strerror(save_errno)); @@ -1792,7 +1792,7 @@ read_database_map(pgBackup *backup) join_path_components(path, backup->root_dir, DATABASE_DIR); join_path_components(database_map_path, path, DATABASE_MAP); - fp = fio_open_stream(database_map_path, FIO_BACKUP_HOST); + fp = fio_open_stream(FIO_BACKUP_HOST, database_map_path); if (fp == NULL) { /* It is NOT ok for database_map to be missing at this point, so @@ -1858,7 +1858,7 @@ cleanup_tablespace(const char *path) join_path_components(fullpath, path, file->rel_path); - if (fio_remove(fullpath, false, FIO_DB_HOST) == 0) + if (fio_remove(FIO_DB_HOST, fullpath, false) == 0) elog(VERBOSE, "Deleted file \"%s\"", fullpath); else elog(ERROR, "Cannot delete file or directory \"%s\": %s", fullpath, strerror(errno)); diff --git a/src/fetch.c b/src/fetch.c index bef30dac6..2609b3220 100644 --- a/src/fetch.c +++ b/src/fetch.c @@ -3,7 +3,7 @@ * fetch.c * Functions for fetching files from PostgreSQL data directory * - * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group * *------------------------------------------------------------------------- */ @@ -35,7 +35,7 @@ slurpFile(const char *datadir, const char *path, size_t *filesize, bool safe, fi join_path_components(fullpath, datadir, path); - if ((fd = fio_open(fullpath, O_RDONLY | PG_BINARY, location)) == -1) + if ((fd = fio_open(location, fullpath, O_RDONLY | PG_BINARY)) == -1) { if (safe) return NULL; @@ -44,7 +44,7 @@ slurpFile(const char *datadir, const char *path, size_t *filesize, bool safe, fi fullpath, strerror(errno)); } - if (fio_stat(fullpath, &statbuf, true, location) < 0) + if (fio_stat(location, fullpath, &statbuf, true) < 0) { if (safe) return NULL; diff --git a/src/merge.c b/src/merge.c index 57c4096cf..16acf49ea 100644 --- a/src/merge.c +++ b/src/merge.c @@ -714,7 +714,7 @@ merge_chain(InstanceState *instanceState, cleanup_header_map(&(full_backup->hdr_map)); /* sync new header map to disk */ - if (fio_sync(full_backup->hdr_map.path_tmp, FIO_BACKUP_HOST) != 0) + if (fio_sync(FIO_BACKUP_HOST, full_backup->hdr_map.path_tmp) != 0) elog(ERROR, "Cannot sync temp header map \"%s\": %s", full_backup->hdr_map.path_tmp, strerror(errno)); @@ -809,7 +809,7 @@ merge_chain(InstanceState *instanceState, /* We need full path, file object has relative path */ join_path_components(full_file_path, full_database_dir, full_file->rel_path); - if (fio_remove(full_file_path, false, FIO_BACKUP_HOST) == 0) + if (fio_remove(FIO_BACKUP_HOST, full_file_path, false) == 0) elog(VERBOSE, "Deleted \"%s\"", full_file_path); else elog(ERROR, "Cannot delete file or directory \"%s\": %s", full_file_path, strerror(errno)); @@ -1145,7 +1145,7 @@ remove_dir_with_files(const char *path) join_path_components(full_path, path, file->rel_path); - if (fio_remove(full_path, false, FIO_LOCAL_HOST) == 0) + if (fio_remove(FIO_LOCAL_HOST, full_path, false) == 0) elog(VERBOSE, "Deleted \"%s\"", full_path); else elog(ERROR, "Cannot delete file or directory \"%s\": %s", full_path, strerror(errno)); @@ -1284,7 +1284,7 @@ merge_data_file(parray *parent_chain, pgBackup *full_backup, return; /* sync second temp file to disk */ - if (!no_sync && fio_sync(to_fullpath_tmp2, FIO_BACKUP_HOST) != 0) + if (!no_sync && fio_sync(FIO_BACKUP_HOST, to_fullpath_tmp2) != 0) elog(ERROR, "Cannot sync merge temp file \"%s\": %s", to_fullpath_tmp2, strerror(errno)); @@ -1390,7 +1390,7 @@ merge_non_data_file(parray *parent_chain, pgBackup *full_backup, to_fullpath_tmp, BACKUP_MODE_FULL, 0, false); /* sync temp file to disk */ - if (!no_sync && fio_sync(to_fullpath_tmp, FIO_BACKUP_HOST) != 0) + if (!no_sync && fio_sync(FIO_BACKUP_HOST, to_fullpath_tmp) != 0) elog(ERROR, "Cannot sync merge temp file \"%s\": %s", to_fullpath_tmp, strerror(errno)); diff --git a/src/parsexlog.c b/src/parsexlog.c index 7f1ca9c75..df9b96fb3 100644 --- a/src/parsexlog.c +++ b/src/parsexlog.c @@ -5,7 +5,7 @@ * * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California - * Portions Copyright (c) 2015-2019, Postgres Professional + * Portions Copyright (c) 2015-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -1040,8 +1040,8 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, reader_data->thread_num, reader_data->xlogpath); reader_data->xlogexists = true; - reader_data->xlogfile = fio_open(reader_data->xlogpath, - O_RDONLY | PG_BINARY, FIO_LOCAL_HOST); + reader_data->xlogfile = fio_open(FIO_LOCAL_HOST, reader_data->xlogpath, + O_RDONLY | PG_BINARY); if (reader_data->xlogfile < 0) { @@ -1059,8 +1059,8 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, reader_data->thread_num, reader_data->gz_xlogpath); reader_data->xlogexists = true; - reader_data->gz_xlogfile = fio_gzopen(reader_data->gz_xlogpath, - "rb", -1, FIO_LOCAL_HOST); + reader_data->gz_xlogfile = fio_gzopen(FIO_LOCAL_HOST, reader_data->gz_xlogpath, + "rb", -1); if (reader_data->gz_xlogfile == NULL) { elog(WARNING, "Thread [%d]: Could not open compressed WAL segment \"%s\": %s", @@ -1946,4 +1946,4 @@ static XLogReaderState* WalReaderAllocate(uint32 wal_seg_size, XLogReaderData *r #else return XLogReaderAllocate(&SimpleXLogPageRead, reader_data); #endif -} \ No newline at end of file +} diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 49e226ace..d713babdd 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -35,7 +35,7 @@ * which includes info about pgdata directory and connection. * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2021, Postgres Professional + * Portions Copyright (c) 2015-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -516,8 +516,8 @@ main(int argc, char *argv[]) { struct stat st; - if (fio_stat(instanceState->instance_backup_subdir_path, - &st, true, FIO_BACKUP_HOST) != 0) + if (fio_stat(FIO_BACKUP_HOST, instanceState->instance_backup_subdir_path, + &st, true) != 0) { elog(WARNING, "Failed to access directory \"%s\": %s", instanceState->instance_backup_subdir_path, strerror(errno)); @@ -849,7 +849,7 @@ main(int argc, char *argv[]) */ char *stripped_wal_file_path = pgut_str_strip_trailing_filename(wal_file_path, wal_file_name); join_path_components(archive_push_xlog_dir, instance_config.pgdata, XLOGDIR); - if (fio_is_same_file(stripped_wal_file_path, archive_push_xlog_dir, true, FIO_DB_HOST)) + if (fio_is_same_file(FIO_DB_HOST, stripped_wal_file_path, archive_push_xlog_dir, true)) { /* 2nd case */ system_id = get_system_identifier(instance_config.pgdata, FIO_DB_HOST, false); diff --git a/src/restore.c b/src/restore.c index a31ffd983..dba9bc0b0 100644 --- a/src/restore.c +++ b/src/restore.c @@ -817,8 +817,8 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, elog(LOG, "Restore external directories"); for (i = 0; i < parray_num(external_dirs); i++) - fio_mkdir(parray_get(external_dirs, i), - DIR_PERMISSION, FIO_DB_HOST); + fio_mkdir(FIO_DB_HOST, parray_get(external_dirs, i), + DIR_PERMISSION); } /* @@ -844,7 +844,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, join_path_components(dirpath, external_path, file->rel_path); elog(VERBOSE, "Create external directory \"%s\"", dirpath); - fio_mkdir(dirpath, file->mode, FIO_DB_HOST); + fio_mkdir(FIO_DB_HOST, dirpath, file->mode); } } @@ -922,7 +922,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, join_path_components(fullpath, pgdata_path, file->rel_path); - if (fio_remove(fullpath, false, FIO_DB_HOST) == 0) + if (fio_remove(FIO_DB_HOST, fullpath, false) == 0) elog(VERBOSE, "Deleted file \"%s\"", fullpath); else elog(ERROR, "Cannot delete redundant file \"%s\": %s", fullpath, strerror(errno)); @@ -1066,7 +1066,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, } /* TODO: write test for case: file to be synced is missing */ - if (fio_sync(to_fullpath, FIO_DB_HOST) != 0) + if (fio_sync(FIO_DB_HOST, to_fullpath) != 0) elog(ERROR, "Failed to sync file \"%s\": %s", to_fullpath, strerror(errno)); } @@ -1223,20 +1223,20 @@ restore_files(void *arg) * if file do not exist */ if ((already_exists && dest_file->write_size == 0) || !already_exists) - out = fio_fopen(to_fullpath, PG_BINARY_W, FIO_DB_HOST); + out = fio_fopen(FIO_DB_HOST, to_fullpath, PG_BINARY_W); /* * If file already exists and dest size is not zero, * then open it for reading and writing. */ else - out = fio_fopen(to_fullpath, PG_BINARY_R "+", FIO_DB_HOST); + out = fio_fopen(FIO_DB_HOST, to_fullpath, PG_BINARY_R "+"); if (out == NULL) elog(ERROR, "Cannot open restore target file \"%s\": %s", to_fullpath, strerror(errno)); /* update file permission */ - if (fio_chmod(to_fullpath, dest_file->mode, FIO_DB_HOST) == -1) + if (fio_chmod(FIO_DB_HOST, to_fullpath, dest_file->mode) == -1) elog(ERROR, "Cannot change mode of \"%s\": %s", to_fullpath, strerror(errno)); @@ -1485,12 +1485,12 @@ update_recovery_options_before_v12(InstanceState *instanceState, pgBackup *backu elog(LOG, "update recovery settings in recovery.conf"); join_path_components(path, instance_config.pgdata, "recovery.conf"); - fp = fio_fopen(path, "w", FIO_DB_HOST); + fp = fio_fopen(FIO_DB_HOST, path, "w"); if (fp == NULL) elog(ERROR, "cannot open file \"%s\": %s", path, strerror(errno)); - if (fio_chmod(path, FILE_PERMISSION, FIO_DB_HOST) == -1) + if (fio_chmod(FIO_DB_HOST, path, FILE_PERMISSION) == -1) elog(ERROR, "Cannot change mode of \"%s\": %s", path, strerror(errno)); fio_fprintf(fp, "# recovery.conf generated by pg_probackup %s\n", @@ -1542,7 +1542,7 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, join_path_components(postgres_auto_path, instance_config.pgdata, "postgresql.auto.conf"); - if (fio_stat(postgres_auto_path, &st, false, FIO_DB_HOST) < 0) + if (fio_stat(FIO_DB_HOST, postgres_auto_path, &st, false) < 0) { /* file not found is not an error case */ if (errno != ENOENT) @@ -1554,13 +1554,13 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, /* Kludge for 0-sized postgresql.auto.conf file. TODO: make something more intelligent */ if (st.st_size > 0) { - fp = fio_open_stream(postgres_auto_path, FIO_DB_HOST); + fp = fio_open_stream(FIO_DB_HOST, postgres_auto_path); if (fp == NULL) elog(ERROR, "cannot open \"%s\": %s", postgres_auto_path, strerror(errno)); } sprintf(postgres_auto_path_tmp, "%s.tmp", postgres_auto_path); - fp_tmp = fio_fopen(postgres_auto_path_tmp, "w", FIO_DB_HOST); + fp_tmp = fio_fopen(FIO_DB_HOST, postgres_auto_path_tmp, "w"); if (fp_tmp == NULL) elog(ERROR, "cannot open \"%s\": %s", postgres_auto_path_tmp, strerror(errno)); @@ -1609,16 +1609,16 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, strerror(errno)); pg_free(buf); - if (fio_rename(postgres_auto_path_tmp, postgres_auto_path, FIO_DB_HOST) < 0) + if (fio_rename(FIO_DB_HOST, postgres_auto_path_tmp, postgres_auto_path) < 0) elog(ERROR, "Cannot rename file \"%s\" to \"%s\": %s", postgres_auto_path_tmp, postgres_auto_path, strerror(errno)); - if (fio_chmod(postgres_auto_path, FILE_PERMISSION, FIO_DB_HOST) == -1) + if (fio_chmod(FIO_DB_HOST, postgres_auto_path, FILE_PERMISSION) == -1) elog(ERROR, "Cannot change mode of \"%s\": %s", postgres_auto_path, strerror(errno)); if (params) { - fp = fio_fopen(postgres_auto_path, "a", FIO_DB_HOST); + fp = fio_fopen(FIO_DB_HOST, postgres_auto_path, "a"); if (fp == NULL) elog(ERROR, "cannot open file \"%s\": %s", postgres_auto_path, strerror(errno)); @@ -1652,7 +1652,7 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, elog(LOG, "creating recovery.signal file"); join_path_components(path, instance_config.pgdata, "recovery.signal"); - fp = fio_fopen(path, PG_BINARY_W, FIO_DB_HOST); + fp = fio_fopen(FIO_DB_HOST, path, PG_BINARY_W); if (fp == NULL) elog(ERROR, "cannot open file \"%s\": %s", path, strerror(errno)); @@ -1668,7 +1668,7 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, elog(LOG, "creating standby.signal file"); join_path_components(path, instance_config.pgdata, "standby.signal"); - fp = fio_fopen(path, PG_BINARY_W, FIO_DB_HOST); + fp = fio_fopen(FIO_DB_HOST, path, PG_BINARY_W); if (fp == NULL) elog(ERROR, "cannot open file \"%s\": %s", path, strerror(errno)); @@ -2208,7 +2208,7 @@ check_incremental_compatibility(const char *pgdata, uint64 system_identifier, if (incremental_mode == INCR_LSN) { join_path_components(backup_label, pgdata, "backup_label"); - if (fio_access(backup_label, F_OK, FIO_DB_HOST) == 0) + if (fio_access(FIO_DB_HOST, backup_label, F_OK) == 0) { elog(WARNING, "Destination directory contains \"backup_control\" file. " "This does NOT mean that you should delete this file and retry, only that " diff --git a/src/util.c b/src/util.c index fb33fd046..16cc01dad 100644 --- a/src/util.c +++ b/src/util.c @@ -3,7 +3,7 @@ * util.c: log messages to log file or stderr, and misc code. * * Portions Copyright (c) 2009-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2019, Postgres Professional + * Portions Copyright (c) 2015-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -138,8 +138,8 @@ writeControlFile(ControlFileData *ControlFile, const char *path, fio_location lo memcpy(buffer, ControlFile, sizeof(ControlFileData)); /* Write pg_control */ - fd = fio_open(path, - O_RDWR | O_CREAT | O_TRUNC | PG_BINARY, location); + fd = fio_open(location, path, + O_RDWR | O_CREAT | O_TRUNC | PG_BINARY); if (fd < 0) elog(ERROR, "Failed to open file: %s", path); diff --git a/src/utils/configuration.c b/src/utils/configuration.c index 8c5127a60..a7cf25133 100644 --- a/src/utils/configuration.c +++ b/src/utils/configuration.c @@ -3,7 +3,7 @@ * configuration.c: - function implementations to work with pg_probackup * configurations. * - * Copyright (c) 2017-2019, Postgres Professional + * Copyright (c) 2017-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -555,7 +555,7 @@ config_read_opt(const char *path, ConfigOption options[], int elevel, if (!options) return parsed_options; - if ((fp = fio_open_stream(path, FIO_BACKUP_HOST)) == NULL) + if ((fp = fio_open_stream(FIO_BACKUP_HOST, path)) == NULL) { if (missing_ok && errno == ENOENT) return parsed_options; diff --git a/src/utils/file.c b/src/utils/file.c index e97924caa..89804ce30 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -287,7 +287,7 @@ fio_get_agent_version(void) /* Open input stream. Remote file is fetched to the in-memory buffer and then accessed through Linux fmemopen */ FILE* -fio_open_stream(char const* path, fio_location location) +fio_open_stream(fio_location location, char const* path) { FILE* f; if (fio_is_remote(location)) @@ -340,7 +340,7 @@ fio_close_stream(FILE* f) /* Open directory */ DIR* -fio_opendir(char const* path, fio_location location) +fio_opendir(fio_location location, char const* path) { DIR* dir; if (fio_is_remote(location)) @@ -432,7 +432,7 @@ fio_closedir(DIR *dir) /* Open file */ int -fio_open(char const* path, int mode, fio_location location) +fio_open(fio_location location, char const* path, int mode) { int fd; if (fio_is_remote(location)) @@ -500,7 +500,7 @@ fio_disconnect(void) /* Open stdio file */ FILE* -fio_fopen(char const* path, char const* mode, fio_location location) +fio_fopen(fio_location location, char const* path, char const* mode) { FILE *f = NULL; @@ -531,7 +531,7 @@ fio_fopen(char const* path, char const* mode, fio_location location) } else { Assert(false); } - fd = fio_open(path, flags, location); + fd = fio_open(location, path, flags); if (fd >= 0) f = (FILE*)(size_t)((fd + 1) & ~FIO_PIPE_MARKER); } @@ -549,8 +549,8 @@ int fio_fprintf(FILE* f, char const* format, ...) { int rc; - va_list args; - va_start (args, format); + va_list args; + va_start (args, format); if (fio_is_remote_file(f)) { char buf[PRINTF_BUF_SIZE]; @@ -567,7 +567,7 @@ fio_fprintf(FILE* f, char const* format, ...) { rc = vfprintf(f, format, args); } - va_end (args); + va_end (args); return rc; } @@ -1112,7 +1112,7 @@ fio_read(int fd, void* buf, size_t size) /* Get information about file */ int -fio_stat(char const* path, struct stat* st, bool follow_symlink, fio_location location) +fio_stat(fio_location location, char const* path, struct stat* st, bool follow_symlink) { if (fio_is_remote(location)) { @@ -1149,15 +1149,15 @@ fio_stat(char const* path, struct stat* st, bool follow_symlink, fio_location lo * in windows compare only filenames */ bool -fio_is_same_file(char const* filename1, char const* filename2, bool follow_symlink, fio_location location) +fio_is_same_file(fio_location location, char const* filename1, char const* filename2, bool follow_symlink) { #ifndef WIN32 struct stat stat1, stat2; - if (fio_stat(filename1, &stat1, follow_symlink, location) < 0) + if (fio_stat(location, filename1, &stat1, follow_symlink) < 0) elog(ERROR, "Can't stat file \"%s\": %s", filename1, strerror(errno)); - if (fio_stat(filename2, &stat2, follow_symlink, location) < 0) + if (fio_stat(location, filename2, &stat2, follow_symlink) < 0) elog(ERROR, "Can't stat file \"%s\": %s", filename2, strerror(errno)); return stat1.st_ino == stat2.st_ino && stat1.st_dev == stat2.st_dev; @@ -1179,7 +1179,7 @@ fio_is_same_file(char const* filename1, char const* filename2, bool follow_symli * returned value >= valsiz) */ ssize_t -fio_readlink(const char *path, char *value, size_t valsiz, fio_location location) +fio_readlink(fio_location location, const char *path, char *value, size_t valsiz) { if (!fio_is_remote(location)) { @@ -1213,7 +1213,7 @@ fio_readlink(const char *path, char *value, size_t valsiz, fio_location location /* Check presence of the file */ int -fio_access(char const* path, int mode, fio_location location) +fio_access(fio_location location, char const* path, int mode) { if (fio_is_remote(location)) { @@ -1245,7 +1245,7 @@ fio_access(char const* path, int mode, fio_location location) /* Create symbolic link */ int -fio_symlink(char const* target, char const* link_path, bool overwrite, fio_location location) +fio_symlink(fio_location location, char const* target, char const* link_path, bool overwrite) { if (fio_is_remote(location)) { @@ -1288,7 +1288,7 @@ fio_symlink_impl(int out, char *buf, bool overwrite) /* Rename file */ int -fio_rename(char const* old_path, char const* new_path, fio_location location) +fio_rename(fio_location location, char const* old_path, char const* new_path) { if (fio_is_remote(location)) { @@ -1315,7 +1315,7 @@ fio_rename(char const* old_path, char const* new_path, fio_location location) /* Sync file to disk */ int -fio_sync(char const* path, fio_location location) +fio_sync(fio_location location, char const* path) { if (fio_is_remote(location)) { @@ -1358,7 +1358,7 @@ fio_sync(char const* path, fio_location location) /* Get crc32 of file */ pg_crc32 -fio_get_crc32(const char *file_path, fio_location location, bool decompress) +fio_get_crc32(fio_location location, const char *file_path, bool decompress) { if (fio_is_remote(location)) { @@ -1393,7 +1393,7 @@ fio_get_crc32(const char *file_path, fio_location location, bool decompress) * if missing_ok, then ignore ENOENT error */ int -fio_remove(char const* path, bool missing_ok, fio_location location) +fio_remove(fio_location location, char const* path, bool missing_ok) { int result = 0; @@ -1453,7 +1453,7 @@ fio_remove_impl(char const* path, bool missing_ok, int out) * TODO: add strict flag */ int -fio_mkdir(char const* path, int mode, fio_location location) +fio_mkdir(fio_location location, char const* path, int mode) { if (fio_is_remote(location)) { @@ -1480,7 +1480,7 @@ fio_mkdir(char const* path, int mode, fio_location location) /* Change file mode */ int -fio_chmod(char const* path, int mode, fio_location location) +fio_chmod(fio_location location, char const* path, int mode) { if (fio_is_remote(location)) { @@ -1552,7 +1552,7 @@ fio_check_error_fd_gz(gzFile f, char **errmsg) /* On error returns NULL and errno should be checked */ gzFile -fio_gzopen(char const* path, char const* mode, int level, fio_location location) +fio_gzopen(fio_location location, char const* path, char const* mode, int level) { int rc; if (fio_is_remote(location)) @@ -1574,7 +1574,7 @@ fio_gzopen(char const* path, char const* mode, int level, fio_location location) if (rc == Z_OK) { gz->compress = 1; - gz->fd = fio_open(path, O_WRONLY | O_CREAT | O_EXCL | PG_BINARY, location); + gz->fd = fio_open(location, path, O_WRONLY | O_CREAT | O_EXCL | PG_BINARY); if (gz->fd < 0) { free(gz); @@ -1591,7 +1591,7 @@ fio_gzopen(char const* path, char const* mode, int level, fio_location location) if (rc == Z_OK) { gz->compress = 0; - gz->fd = fio_open(path, O_RDONLY | PG_BINARY, location); + gz->fd = fio_open(location, path, O_RDONLY | PG_BINARY); if (gz->fd < 0) { free(gz); @@ -2098,12 +2098,12 @@ fio_copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, if (use_pagemap) IO_CHECK(fio_write_all(fio_stdout, (*file).pagemap.bitmap, (*file).pagemap.bitmapsize), (*file).pagemap.bitmapsize); - out = fio_fopen(to_fullpath, PG_BINARY_R "+", FIO_BACKUP_HOST); + out = fio_fopen(FIO_BACKUP_HOST, to_fullpath, PG_BINARY_R "+"); if (out == NULL) elog(ERROR, "Cannot open restore target file \"%s\": %s", to_fullpath, strerror(errno)); /* update file permission */ - if (fio_chmod(to_fullpath, file->mode, FIO_BACKUP_HOST) == -1) + if (fio_chmod(FIO_BACKUP_HOST, to_fullpath, file->mode) == -1) elog(ERROR, "Cannot change mode of \"%s\": %s", to_fullpath, strerror(errno)); @@ -3237,11 +3237,11 @@ fio_communicate(int in, int out) pg_crc32 crc; #ifdef WIN32 - SYS_CHECK(setmode(in, _O_BINARY)); - SYS_CHECK(setmode(out, _O_BINARY)); + SYS_CHECK(setmode(in, _O_BINARY)); + SYS_CHECK(setmode(out, _O_BINARY)); #endif - /* Main loop until end of processing all master commands */ + /* Main loop until end of processing all master commands */ while ((rc = fio_read_all(in, &hdr, sizeof hdr)) == sizeof(hdr)) { if (hdr.size != 0) { if (hdr.size > buf_size) { diff --git a/src/utils/file.h b/src/utils/file.h index 3b06752c9..47202e29f 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -40,7 +40,7 @@ typedef enum /* used for incremental restore */ FIO_GET_CHECKSUM_MAP, FIO_GET_LSN_MAP, - /* used in fio_send_pages */ + /* used in fio_send_pages */ FIO_SEND_PAGES, FIO_ERROR, FIO_SEND_FILE, @@ -76,9 +76,13 @@ typedef struct { // fio_operations cop; // 16 + /* fio operation, see fio_operations enum */ unsigned cop : 32; + /* */ unsigned handle : 32; + /* size of additional data sent after this header */ unsigned size : 32; + /* additional small paramter for requests (varies between operations) or a result code for response */ unsigned arg; } fio_header; @@ -89,15 +93,17 @@ extern fio_location MyLocation; extern void fio_redirect(int in, int out, int err); extern void fio_communicate(int in, int out); +extern void fio_disconnect(void); extern int fio_get_agent_version(void); -extern FILE* fio_fopen(char const* name, char const* mode, fio_location location); +extern void fio_error(int rc, int size, char const* file, int line); + +/* FILE-style functions */ +extern FILE* fio_fopen(fio_location location, char const* name, char const* mode); extern size_t fio_fwrite(FILE* f, void const* buf, size_t size); extern ssize_t fio_fwrite_async_compressed(FILE* f, void const* buf, size_t size, int compress_alg); extern size_t fio_fwrite_async(FILE* f, void const* buf, size_t size); extern int fio_check_error_file(FILE* f, char **errmsg); -extern int fio_check_error_fd(int fd, char **errmsg); -extern int fio_check_error_fd_gz(gzFile f, char **errmsg); extern ssize_t fio_fread(FILE* f, void* buf, size_t size); extern int fio_pread(FILE* f, void* buf, off_t offs); extern int fio_fprintf(FILE* f, char const* arg, ...) pg_attribute_printf(2, 3); @@ -106,38 +112,45 @@ extern int fio_fseek(FILE* f, off_t offs); extern int fio_ftruncate(FILE* f, off_t size); extern int fio_fclose(FILE* f); extern int fio_ffstat(FILE* f, struct stat* st); -extern void fio_error(int rc, int size, char const* file, int line); -extern int fio_open(char const* name, int mode, fio_location location); +extern FILE* fio_open_stream(fio_location location, char const* name); +extern int fio_close_stream(FILE* f); + +/* fd-style functions */ +extern int fio_open(fio_location location, char const* name, int mode); extern ssize_t fio_write(int fd, void const* buf, size_t size); extern ssize_t fio_write_async(int fd, void const* buf, size_t size); +extern int fio_check_error_fd(int fd, char **errmsg); +extern int fio_check_error_fd_gz(gzFile f, char **errmsg); extern ssize_t fio_read(int fd, void* buf, size_t size); extern int fio_flush(int fd); extern int fio_seek(int fd, off_t offs); extern int fio_fstat(int fd, struct stat* st); extern int fio_truncate(int fd, off_t size); extern int fio_close(int fd); -extern void fio_disconnect(void); -extern int fio_sync(char const* path, fio_location location); -extern pg_crc32 fio_get_crc32(const char *file_path, fio_location location, bool decompress); - -extern int fio_rename(char const* old_path, char const* new_path, fio_location location); -extern int fio_symlink(char const* target, char const* link_path, bool overwrite, fio_location location); -extern int fio_remove(char const* path, bool missing_ok, fio_location location); -extern int fio_mkdir(char const* path, int mode, fio_location location); -extern int fio_chmod(char const* path, int mode, fio_location location); -extern int fio_access(char const* path, int mode, fio_location location); -extern int fio_stat(char const* path, struct stat* st, bool follow_symlinks, fio_location location); -extern bool fio_is_same_file(char const* filename1, char const* filename2, bool follow_symlink, fio_location location); -extern ssize_t fio_readlink(const char *path, char *value, size_t valsiz, fio_location location); -extern DIR* fio_opendir(char const* path, fio_location location); + +/* DIR-style functions */ +extern DIR* fio_opendir(fio_location location, char const* path); extern struct dirent * fio_readdir(DIR *dirp); extern int fio_closedir(DIR *dirp); -extern FILE* fio_open_stream(char const* name, fio_location location); -extern int fio_close_stream(FILE* f); +/* pathname-style functions */ +extern int fio_sync(fio_location location, char const* path); +extern pg_crc32 fio_get_crc32(fio_location location, const char *file_path, bool decompress); + +extern int fio_rename(fio_location location, char const* old_path, char const* new_path); +extern int fio_symlink(fio_location location, char const* target, char const* link_path, bool overwrite); +extern int fio_remove(fio_location location, char const* path, bool missing_ok); +extern int fio_mkdir(fio_location location, char const* path, int mode); +extern int fio_chmod(fio_location location, char const* path, int mode); +extern int fio_access(fio_location location, char const* path, int mode); +extern int fio_stat(fio_location location, char const* path, struct stat* st, bool follow_symlinks); +extern bool fio_is_same_file(fio_location location, char const* filename1, char const* filename2, bool follow_symlink); +extern ssize_t fio_readlink(fio_location location, const char *path, char *value, size_t valsiz); + +/* gzFile-style functions */ #ifdef HAVE_LIBZ -extern gzFile fio_gzopen(char const* path, char const* mode, int level, fio_location location); +extern gzFile fio_gzopen(fio_location location, char const* path, char const* mode, int level); extern int fio_gzclose(gzFile file); extern int fio_gzread(gzFile f, void *buf, unsigned size); extern int fio_gzwrite(gzFile f, void const* buf, unsigned size); From 4ec64a9b4efb2a07fa7e8f0e3cf8e2c7c753c45b Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 7 Jan 2022 04:32:14 +0300 Subject: [PATCH 004/339] [refactoring] 's/char const/const char/g' I prefer the declaration in form 'char const', but in the source code the form 'const char' is used more often --- src/utils/configuration.c | 6 +++--- src/utils/configuration.h | 4 ++-- src/utils/file.c | 40 +++++++++++++++++++-------------------- src/utils/file.h | 32 +++++++++++++++---------------- src/utils/remote.c | 4 ++-- 5 files changed, 43 insertions(+), 43 deletions(-) diff --git a/src/utils/configuration.c b/src/utils/configuration.c index a7cf25133..fa3f444ce 100644 --- a/src/utils/configuration.c +++ b/src/utils/configuration.c @@ -88,7 +88,7 @@ static const unit_conversion time_unit_conversion_table[] = }; /* Order is important, keep it in sync with utils/configuration.h:enum ProbackupSubcmd declaration */ -static char const * const subcmd_names[] = +static const char * const subcmd_names[] = { "NO_CMD", "init", @@ -114,7 +114,7 @@ static char const * const subcmd_names[] = }; ProbackupSubcmd -parse_subcmd(char const * const subcmd_str) +parse_subcmd(const char * const subcmd_str) { struct { ProbackupSubcmd id; @@ -137,7 +137,7 @@ parse_subcmd(char const * const subcmd_str) return NO_CMD; } -char const * +const char * get_subcmd_name(ProbackupSubcmd const subcmd) { Assert((int)subcmd < sizeof(subcmd_names) / sizeof(subcmd_names[0])); diff --git a/src/utils/configuration.h b/src/utils/configuration.h index 2c6ea3eec..e42544466 100644 --- a/src/utils/configuration.h +++ b/src/utils/configuration.h @@ -101,8 +101,8 @@ struct ConfigOption #define OPTION_UNIT (OPTION_UNIT_MEMORY | OPTION_UNIT_TIME) -extern ProbackupSubcmd parse_subcmd(char const * const subcmd_str); -extern char const *get_subcmd_name(ProbackupSubcmd const subcmd); +extern ProbackupSubcmd parse_subcmd(const char * const subcmd_str); +extern const char *get_subcmd_name(ProbackupSubcmd const subcmd); extern int config_get_opt(int argc, char **argv, ConfigOption cmd_options[], ConfigOption options[]); extern int config_read_opt(const char *path, ConfigOption options[], int elevel, diff --git a/src/utils/file.c b/src/utils/file.c index 89804ce30..9f5178908 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -112,7 +112,7 @@ fio_redirect(int in, int out, int err) } void -fio_error(int rc, int size, char const* file, int line) +fio_error(int rc, int size, const char* file, int line) { if (remote_agent) { @@ -192,7 +192,7 @@ pread(int fd, void* buf, size_t size, off_t off) #ifdef WIN32 static int -remove_file_or_dir(char const* path) +remove_file_or_dir(const char* path) { int rc = remove(path); @@ -287,7 +287,7 @@ fio_get_agent_version(void) /* Open input stream. Remote file is fetched to the in-memory buffer and then accessed through Linux fmemopen */ FILE* -fio_open_stream(fio_location location, char const* path) +fio_open_stream(fio_location location, const char* path) { FILE* f; if (fio_is_remote(location)) @@ -340,7 +340,7 @@ fio_close_stream(FILE* f) /* Open directory */ DIR* -fio_opendir(fio_location location, char const* path) +fio_opendir(fio_location location, const char* path) { DIR* dir; if (fio_is_remote(location)) @@ -432,7 +432,7 @@ fio_closedir(DIR *dir) /* Open file */ int -fio_open(fio_location location, char const* path, int mode) +fio_open(fio_location location, const char* path, int mode) { int fd; if (fio_is_remote(location)) @@ -500,7 +500,7 @@ fio_disconnect(void) /* Open stdio file */ FILE* -fio_fopen(fio_location location, char const* path, char const* mode) +fio_fopen(fio_location location, const char* path, const char* mode) { FILE *f = NULL; @@ -546,7 +546,7 @@ fio_fopen(fio_location location, char const* path, char const* mode) /* Format output to file stream */ int -fio_fprintf(FILE* f, char const* format, ...) +fio_fprintf(FILE* f, const char* format, ...) { int rc; va_list args; @@ -1112,7 +1112,7 @@ fio_read(int fd, void* buf, size_t size) /* Get information about file */ int -fio_stat(fio_location location, char const* path, struct stat* st, bool follow_symlink) +fio_stat(fio_location location, const char* path, struct stat* st, bool follow_symlink) { if (fio_is_remote(location)) { @@ -1149,7 +1149,7 @@ fio_stat(fio_location location, char const* path, struct stat* st, bool follow_s * in windows compare only filenames */ bool -fio_is_same_file(fio_location location, char const* filename1, char const* filename2, bool follow_symlink) +fio_is_same_file(fio_location location, const char* filename1, const char* filename2, bool follow_symlink) { #ifndef WIN32 struct stat stat1, stat2; @@ -1213,7 +1213,7 @@ fio_readlink(fio_location location, const char *path, char *value, size_t valsiz /* Check presence of the file */ int -fio_access(fio_location location, char const* path, int mode) +fio_access(fio_location location, const char* path, int mode) { if (fio_is_remote(location)) { @@ -1245,7 +1245,7 @@ fio_access(fio_location location, char const* path, int mode) /* Create symbolic link */ int -fio_symlink(fio_location location, char const* target, char const* link_path, bool overwrite) +fio_symlink(fio_location location, const char* target, const char* link_path, bool overwrite) { if (fio_is_remote(location)) { @@ -1288,7 +1288,7 @@ fio_symlink_impl(int out, char *buf, bool overwrite) /* Rename file */ int -fio_rename(fio_location location, char const* old_path, char const* new_path) +fio_rename(fio_location location, const char* old_path, const char* new_path) { if (fio_is_remote(location)) { @@ -1315,7 +1315,7 @@ fio_rename(fio_location location, char const* old_path, char const* new_path) /* Sync file to disk */ int -fio_sync(fio_location location, char const* path) +fio_sync(fio_location location, const char* path) { if (fio_is_remote(location)) { @@ -1393,7 +1393,7 @@ fio_get_crc32(fio_location location, const char *file_path, bool decompress) * if missing_ok, then ignore ENOENT error */ int -fio_remove(fio_location location, char const* path, bool missing_ok) +fio_remove(fio_location location, const char* path, bool missing_ok) { int result = 0; @@ -1431,7 +1431,7 @@ fio_remove(fio_location location, char const* path, bool missing_ok) static void -fio_remove_impl(char const* path, bool missing_ok, int out) +fio_remove_impl(const char* path, bool missing_ok, int out) { fio_header hdr = { .cop = FIO_REMOVE, @@ -1453,7 +1453,7 @@ fio_remove_impl(char const* path, bool missing_ok, int out) * TODO: add strict flag */ int -fio_mkdir(fio_location location, char const* path, int mode) +fio_mkdir(fio_location location, const char* path, int mode) { if (fio_is_remote(location)) { @@ -1480,7 +1480,7 @@ fio_mkdir(fio_location location, char const* path, int mode) /* Change file mode */ int -fio_chmod(fio_location location, char const* path, int mode) +fio_chmod(fio_location location, const char* path, int mode) { if (fio_is_remote(location)) { @@ -1552,7 +1552,7 @@ fio_check_error_fd_gz(gzFile f, char **errmsg) /* On error returns NULL and errno should be checked */ gzFile -fio_gzopen(fio_location location, char const* path, char const* mode, int level) +fio_gzopen(fio_location location, const char* path, const char* mode, int level) { int rc; if (fio_is_remote(location)) @@ -1819,7 +1819,7 @@ fio_gzseek(gzFile f, z_off_t offset, int whence) * Note: it should not be used for large files. */ static void -fio_load_file(int out, char const* path) +fio_load_file(int out, const char* path) { int fd = open(path, O_RDONLY); fio_header hdr; @@ -2728,7 +2728,7 @@ fio_send_file(const char *from_fullpath, const char *to_fullpath, FILE* out, * */ static void -fio_send_file_impl(int out, char const* path) +fio_send_file_impl(int out, const char* path) { FILE *fp; fio_header hdr; diff --git a/src/utils/file.h b/src/utils/file.h index 47202e29f..0adbb68ae 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -96,28 +96,28 @@ extern void fio_communicate(int in, int out); extern void fio_disconnect(void); extern int fio_get_agent_version(void); -extern void fio_error(int rc, int size, char const* file, int line); +extern void fio_error(int rc, int size, const char* file, int line); /* FILE-style functions */ -extern FILE* fio_fopen(fio_location location, char const* name, char const* mode); +extern FILE* fio_fopen(fio_location location, const char* name, const char* mode); extern size_t fio_fwrite(FILE* f, void const* buf, size_t size); extern ssize_t fio_fwrite_async_compressed(FILE* f, void const* buf, size_t size, int compress_alg); extern size_t fio_fwrite_async(FILE* f, void const* buf, size_t size); extern int fio_check_error_file(FILE* f, char **errmsg); extern ssize_t fio_fread(FILE* f, void* buf, size_t size); extern int fio_pread(FILE* f, void* buf, off_t offs); -extern int fio_fprintf(FILE* f, char const* arg, ...) pg_attribute_printf(2, 3); +extern int fio_fprintf(FILE* f, const char* arg, ...) pg_attribute_printf(2, 3); extern int fio_fflush(FILE* f); extern int fio_fseek(FILE* f, off_t offs); extern int fio_ftruncate(FILE* f, off_t size); extern int fio_fclose(FILE* f); extern int fio_ffstat(FILE* f, struct stat* st); -extern FILE* fio_open_stream(fio_location location, char const* name); +extern FILE* fio_open_stream(fio_location location, const char* name); extern int fio_close_stream(FILE* f); /* fd-style functions */ -extern int fio_open(fio_location location, char const* name, int mode); +extern int fio_open(fio_location location, const char* name, int mode); extern ssize_t fio_write(int fd, void const* buf, size_t size); extern ssize_t fio_write_async(int fd, void const* buf, size_t size); extern int fio_check_error_fd(int fd, char **errmsg); @@ -130,27 +130,27 @@ extern int fio_truncate(int fd, off_t size); extern int fio_close(int fd); /* DIR-style functions */ -extern DIR* fio_opendir(fio_location location, char const* path); +extern DIR* fio_opendir(fio_location location, const char* path); extern struct dirent * fio_readdir(DIR *dirp); extern int fio_closedir(DIR *dirp); /* pathname-style functions */ -extern int fio_sync(fio_location location, char const* path); +extern int fio_sync(fio_location location, const char* path); extern pg_crc32 fio_get_crc32(fio_location location, const char *file_path, bool decompress); -extern int fio_rename(fio_location location, char const* old_path, char const* new_path); -extern int fio_symlink(fio_location location, char const* target, char const* link_path, bool overwrite); -extern int fio_remove(fio_location location, char const* path, bool missing_ok); -extern int fio_mkdir(fio_location location, char const* path, int mode); -extern int fio_chmod(fio_location location, char const* path, int mode); -extern int fio_access(fio_location location, char const* path, int mode); -extern int fio_stat(fio_location location, char const* path, struct stat* st, bool follow_symlinks); -extern bool fio_is_same_file(fio_location location, char const* filename1, char const* filename2, bool follow_symlink); +extern int fio_rename(fio_location location, const char* old_path, const char* new_path); +extern int fio_symlink(fio_location location, const char* target, const char* link_path, bool overwrite); +extern int fio_remove(fio_location location, const char* path, bool missing_ok); +extern int fio_mkdir(fio_location location, const char* path, int mode); +extern int fio_chmod(fio_location location, const char* path, int mode); +extern int fio_access(fio_location location, const char* path, int mode); +extern int fio_stat(fio_location location, const char* path, struct stat* st, bool follow_symlinks); +extern bool fio_is_same_file(fio_location location, const char* filename1, const char* filename2, bool follow_symlink); extern ssize_t fio_readlink(fio_location location, const char *path, char *value, size_t valsiz); /* gzFile-style functions */ #ifdef HAVE_LIBZ -extern gzFile fio_gzopen(fio_location location, char const* path, char const* mode, int level); +extern gzFile fio_gzopen(fio_location location, const char* path, const char* mode, int level); extern int fio_gzclose(gzFile file); extern int fio_gzread(gzFile f, void *buf, unsigned size); extern int fio_gzwrite(gzFile f, void const* buf, unsigned size); diff --git a/src/utils/remote.c b/src/utils/remote.c index 2bfd24d1e..ec0110db4 100644 --- a/src/utils/remote.c +++ b/src/utils/remote.c @@ -103,7 +103,7 @@ void launch_ssh(char* argv[]) } #endif -static bool needs_quotes(char const* path) +static bool needs_quotes(const char* path) { return strchr(path, ' ') != NULL; } @@ -156,7 +156,7 @@ bool launch_agent(void) if (instance_config.remote.path) { - char const* probackup = PROGRAM_NAME_FULL; + const char* probackup = PROGRAM_NAME_FULL; char* sep = strrchr(probackup, '/'); if (sep != NULL) { probackup = sep + 1; From 1fe300ca2236a4b08d96098a2104efadb93b32c9 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 7 Jan 2022 06:41:37 +0300 Subject: [PATCH 005/339] [ci skip] small fix: typo and indent --- src/catalog.c | 2 +- src/utils/file.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index af7e3b6d5..b0cae0f4f 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -667,7 +667,7 @@ grab_shared_lock_file(pgBackup *backup) else if (errno != ESRCH) elog(ERROR, "Failed to send signal 0 to a process %d: %s", encoded_pid, strerror(errno)); - } + } if (fp_in) { diff --git a/src/utils/file.h b/src/utils/file.h index 0adbb68ae..b0aa0c4ad 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -82,7 +82,7 @@ typedef struct unsigned handle : 32; /* size of additional data sent after this header */ unsigned size : 32; - /* additional small paramter for requests (varies between operations) or a result code for response */ + /* additional small parameter for requests (varies between operations) or a result code for response */ unsigned arg; } fio_header; From ed75ef64014c7d6684a391c3aa999e2edfded25c Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 7 Jan 2022 06:45:49 +0300 Subject: [PATCH 006/339] return errno from remote fio_rename() invocation --- src/utils/file.c | 39 ++++++++++++++++++++++++++++++++------- 1 file changed, 32 insertions(+), 7 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index 9f5178908..326122b3e 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1292,19 +1292,27 @@ fio_rename(fio_location location, const char* old_path, const char* new_path) { if (fio_is_remote(location)) { - fio_header hdr; size_t old_path_len = strlen(old_path) + 1; size_t new_path_len = strlen(new_path) + 1; - hdr.cop = FIO_RENAME; - hdr.handle = -1; - hdr.size = old_path_len + new_path_len; + fio_header hdr = { + .cop = FIO_RENAME, + .handle = -1, + .size = old_path_len + new_path_len, + .arg = 0, + }; IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_write_all(fio_stdout, old_path, old_path_len), old_path_len); IO_CHECK(fio_write_all(fio_stdout, new_path, new_path_len), new_path_len); - //TODO: wait for confirmation. + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + Assert(hdr.cop == FIO_RENAME); + if (hdr.arg != 0) + { + errno = hdr.arg; + return -1; + } return 0; } else @@ -1313,6 +1321,22 @@ fio_rename(fio_location location, const char* old_path, const char* new_path) } } +static void +fio_rename_impl(char const* old_path, const char* new_path, int out) +{ + fio_header hdr = { + .cop = FIO_RENAME, + .handle = -1, + .size = 0, + .arg = 0, + }; + + if (rename(old_path, new_path) != 0) + hdr.arg = errno; + + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); +} + /* Sync file to disk */ int fio_sync(fio_location location, const char* path) @@ -3337,7 +3361,8 @@ fio_communicate(int in, int out) IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); break; case FIO_RENAME: /* Rename file */ - SYS_CHECK(rename(buf, buf + strlen(buf) + 1)); + /* possible buffer overflow */ + fio_rename_impl(buf, buf + strlen(buf) + 1, out); break; case FIO_SYMLINK: /* Create symbolic link */ fio_symlink_impl(out, buf, hdr.arg > 0 ? true : false); @@ -3363,7 +3388,7 @@ fio_communicate(int in, int out) fio_list_dir_impl(out, buf); break; case FIO_SEND_PAGES: - // buf contain fio_send_request header and bitmap. + /* buf contain fio_send_request header and bitmap. */ fio_send_pages_impl(out, buf); break; case FIO_SEND_FILE: From 4d4551a6e7bd9f77811613240d2621f2a06d4a66 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 7 Jan 2022 07:05:09 +0300 Subject: [PATCH 007/339] fio_check_postmaster() refactoring --- src/catchup.c | 2 +- src/pg_probackup.h | 1 - src/restore.c | 2 +- src/utils/file.c | 31 ++++++++++++++++++------------- src/utils/file.h | 1 + 5 files changed, 21 insertions(+), 16 deletions(-) diff --git a/src/catchup.c b/src/catchup.c index f100964ef..79e6039a0 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -133,7 +133,7 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, if (current.backup_mode != BACKUP_MODE_FULL) { pid_t pid; - pid = fio_check_postmaster(dest_pgdata, FIO_LOCAL_HOST); + pid = fio_check_postmaster(FIO_LOCAL_HOST, dest_pgdata); if (pid == 1) /* postmaster.pid is mangled */ { char pid_filename[MAXPGPATH]; diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 269f41adc..42629531e 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1236,7 +1236,6 @@ extern PageState *fio_get_checksum_map(const char *fullpath, uint32 checksum_ver extern datapagemap_t *fio_get_lsn_map(const char *fullpath, uint32 checksum_version, int n_blocks, XLogRecPtr horizonLsn, BlockNumber segmentno, fio_location location); -extern pid_t fio_check_postmaster(const char *pgdata, fio_location location); extern int32 fio_decompress(void* dst, void const* src, size_t size, int compress_alg, char **errormsg); diff --git a/src/restore.c b/src/restore.c index dba9bc0b0..3b087ffef 100644 --- a/src/restore.c +++ b/src/restore.c @@ -2160,7 +2160,7 @@ check_incremental_compatibility(const char *pgdata, uint64 system_identifier, char backup_label[MAXPGPATH]; /* check postmaster pid */ - pid = fio_check_postmaster(pgdata, FIO_DB_HOST); + pid = fio_check_postmaster(FIO_DB_HOST, pgdata); if (pid == 1) /* postmaster.pid is mangled */ { diff --git a/src/utils/file.c b/src/utils/file.c index 326122b3e..63bb6ff1f 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -3206,20 +3206,24 @@ local_check_postmaster(const char *pgdata) * and check that process is running, if process is running, return its pid number. */ pid_t -fio_check_postmaster(const char *pgdata, fio_location location) +fio_check_postmaster(fio_location location, const char *pgdata) { if (fio_is_remote(location)) { - fio_header hdr; - - hdr.cop = FIO_CHECK_POSTMASTER; - hdr.size = strlen(pgdata) + 1; + fio_header hdr = { + .cop = FIO_CHECK_POSTMASTER, + .handle = -1, + .size = strlen(pgdata) + 1, + .arg = 0, + }; IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_write_all(fio_stdout, pgdata, hdr.size), hdr.size); /* receive result */ IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + Assert(hdr.cop == FIO_CHECK_POSTMASTER); + return hdr.arg; } else @@ -3227,16 +3231,18 @@ fio_check_postmaster(const char *pgdata, fio_location location) } static void -fio_check_postmaster_impl(int out, char *buf) +fio_check_postmaster_impl(const char *pgdata, int out) { - fio_header hdr; - pid_t postmaster_pid; - char *pgdata = (char*) buf; + fio_header hdr = { + .cop = FIO_CHECK_POSTMASTER, + .handle = -1, + .size = 0, + .arg = 0, + }; - postmaster_pid = local_check_postmaster(pgdata); + hdr.arg = local_check_postmaster(pgdata); /* send arrays of checksums to main process */ - hdr.arg = postmaster_pid; IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); } @@ -3427,8 +3433,7 @@ fio_communicate(int in, int out) fio_get_lsn_map_impl(out, buf); break; case FIO_CHECK_POSTMASTER: - /* calculate crc32 for a file */ - fio_check_postmaster_impl(out, buf); + fio_check_postmaster_impl(buf, out); break; case FIO_DISCONNECT: hdr.cop = FIO_DISCONNECTED; diff --git a/src/utils/file.h b/src/utils/file.h index b0aa0c4ad..0eae1b741 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -147,6 +147,7 @@ extern int fio_access(fio_location location, const char* path, int mode); extern int fio_stat(fio_location location, const char* path, struct stat* st, bool follow_symlinks); extern bool fio_is_same_file(fio_location location, const char* filename1, const char* filename2, bool follow_symlink); extern ssize_t fio_readlink(fio_location location, const char *path, char *value, size_t valsiz); +extern pid_t fio_check_postmaster(fio_location location, const char *pgdata); /* gzFile-style functions */ #ifdef HAVE_LIBZ From 6d7f9251a139805c59c0627605bb51a706d2a391 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 7 Jan 2022 07:54:24 +0300 Subject: [PATCH 008/339] [refactoring] move some fio_ function declaration from pg_probackup.h to utils/file.h --- src/pg_probackup.h | 18 --------- src/restore.c | 10 +++-- src/utils/file.c | 22 +++++------ src/utils/file.h | 96 +++++++++++++++++++++++++++------------------- 4 files changed, 72 insertions(+), 74 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 42629531e..ea5d930bc 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1209,7 +1209,6 @@ extern int copy_pages(const char *to_fullpath, const char *from_fullpath, BackupMode backup_mode); /* FIO */ -extern void setMyLocation(ProbackupSubcmd const subcmd); extern int fio_send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, XLogRecPtr horizonLsn, int calg, int clevel, uint32 checksum_version, bool use_pagemap, BlockNumber *err_blknum, char **errormsg, @@ -1217,28 +1216,15 @@ extern int fio_send_pages(const char *to_fullpath, const char *from_fullpath, pg extern int fio_copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, XLogRecPtr horizonLsn, int calg, int clevel, uint32 checksum_version, bool use_pagemap, BlockNumber *err_blknum, char **errormsg); -/* return codes for fio_send_pages */ extern int fio_send_file_gz(const char *from_fullpath, const char *to_fullpath, FILE* out, char **errormsg); extern int fio_send_file(const char *from_fullpath, const char *to_fullpath, FILE* out, pgFile *file, char **errormsg); -extern void fio_list_dir(parray *files, const char *root, bool exclude, bool follow_symlink, - bool add_root, bool backup_logs, bool skip_hidden, int external_dir_num); - extern bool pgut_rmtree(const char *path, bool rmtopdir, bool strict); extern void pgut_setenv(const char *key, const char *val); extern void pgut_unsetenv(const char *key); -extern PageState *fio_get_checksum_map(const char *fullpath, uint32 checksum_version, int n_blocks, - XLogRecPtr dest_stop_lsn, BlockNumber segmentno, fio_location location); - -extern datapagemap_t *fio_get_lsn_map(const char *fullpath, uint32 checksum_version, - int n_blocks, XLogRecPtr horizonLsn, BlockNumber segmentno, - fio_location location); - -extern int32 fio_decompress(void* dst, void const* src, size_t size, int compress_alg, char **errormsg); - /* return codes for fio_send_pages() and fio_send_file() */ #define SEND_OK (0) #define FILE_MISSING (-1) @@ -1249,10 +1235,6 @@ extern int32 fio_decompress(void* dst, void const* src, size_t size, int compres #define REMOTE_ERROR (-6) #define PAGE_CORRUPTION (-8) -/* Check if specified location is local for current node */ -extern bool fio_is_remote(fio_location location); -extern bool fio_is_remote_simple(fio_location location); - extern void get_header_errormsg(Page page, char **errormsg); extern void get_checksum_errormsg(Page page, char **errormsg, BlockNumber absolute_blkno); diff --git a/src/restore.c b/src/restore.c index 3b087ffef..0fd9fa44c 100644 --- a/src/restore.c +++ b/src/restore.c @@ -1205,15 +1205,17 @@ restore_files(void *arg) { if (arguments->incremental_mode == INCR_LSN) { - lsn_map = fio_get_lsn_map(to_fullpath, arguments->dest_backup->checksum_version, + lsn_map = fio_get_lsn_map(FIO_DB_HOST, to_fullpath, + arguments->dest_backup->checksum_version, dest_file->n_blocks, arguments->shift_lsn, - dest_file->segno * RELSEG_SIZE, FIO_DB_HOST); + dest_file->segno * RELSEG_SIZE); } else if (arguments->incremental_mode == INCR_CHECKSUM) { - checksum_map = fio_get_checksum_map(to_fullpath, arguments->dest_backup->checksum_version, + checksum_map = fio_get_checksum_map(FIO_DB_HOST, to_fullpath, + arguments->dest_backup->checksum_version, dest_file->n_blocks, arguments->dest_backup->stop_lsn, - dest_file->segno * RELSEG_SIZE, FIO_DB_HOST); + dest_file->segno * RELSEG_SIZE); } } diff --git a/src/utils/file.c b/src/utils/file.c index 63bb6ff1f..167960699 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -909,7 +909,7 @@ fio_write_async_impl(int fd, void const* buf, size_t size, int out) } } -int32 +static int32 fio_decompress(void* dst, void const* src, size_t size, int compress_alg, char **errormsg) { const char *internal_errormsg = NULL; @@ -3016,8 +3016,8 @@ fio_list_dir(parray *files, const char *root, bool exclude, } PageState * -fio_get_checksum_map(const char *fullpath, uint32 checksum_version, int n_blocks, - XLogRecPtr dest_stop_lsn, BlockNumber segmentno, fio_location location) +fio_get_checksum_map(fio_location location, const char *fullpath, uint32 checksum_version, + int n_blocks, XLogRecPtr dest_stop_lsn, BlockNumber segmentno) { if (fio_is_remote(location)) { @@ -3059,7 +3059,7 @@ fio_get_checksum_map(const char *fullpath, uint32 checksum_version, int n_blocks } static void -fio_get_checksum_map_impl(int out, char *buf) +fio_get_checksum_map_impl(char *buf, int out) { fio_header hdr; PageState *checksum_map = NULL; @@ -3079,9 +3079,9 @@ fio_get_checksum_map_impl(int out, char *buf) } datapagemap_t * -fio_get_lsn_map(const char *fullpath, uint32 checksum_version, - int n_blocks, XLogRecPtr shift_lsn, BlockNumber segmentno, - fio_location location) +fio_get_lsn_map(fio_location location, const char *fullpath, + uint32 checksum_version, int n_blocks, + XLogRecPtr shift_lsn, BlockNumber segmentno) { datapagemap_t* lsn_map = NULL; @@ -3127,7 +3127,7 @@ fio_get_lsn_map(const char *fullpath, uint32 checksum_version, } static void -fio_get_lsn_map_impl(int out, char *buf) +fio_get_lsn_map_impl(char *buf, int out) { fio_header hdr; datapagemap_t *lsn_map = NULL; @@ -3425,12 +3425,10 @@ fio_communicate(int in, int out) IO_CHECK(fio_write_all(out, &crc, sizeof(crc)), sizeof(crc)); break; case FIO_GET_CHECKSUM_MAP: - /* calculate crc32 for a file */ - fio_get_checksum_map_impl(out, buf); + fio_get_checksum_map_impl(buf, out); break; case FIO_GET_LSN_MAP: - /* calculate crc32 for a file */ - fio_get_lsn_map_impl(out, buf); + fio_get_lsn_map_impl(buf, out); break; case FIO_CHECK_POSTMASTER: fio_check_postmaster_impl(buf, out); diff --git a/src/utils/file.h b/src/utils/file.h index 0eae1b741..b5f168b54 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -58,20 +58,6 @@ typedef enum FIO_READLINK } fio_operations; -typedef enum -{ - FIO_LOCAL_HOST, /* data is locate at local host */ - FIO_DB_HOST, /* data is located at Postgres server host */ - FIO_BACKUP_HOST, /* data is located at backup host */ - FIO_REMOTE_HOST /* date is located at remote host */ -} fio_location; - -#define FIO_FDMAX 64 -#define FIO_PIPE_MARKER 0x40000000 - -#define SYS_CHECK(cmd) do if ((cmd) < 0) { fprintf(stderr, "%s:%d: (%s) %s\n", __FILE__, __LINE__, #cmd, strerror(errno)); exit(EXIT_FAILURE); } while (0) -#define IO_CHECK(cmd, size) do { int _rc = (cmd); if (_rc != (size)) fio_error(_rc, size, __FILE__, __LINE__); } while (0) - typedef struct { // fio_operations cop; @@ -86,18 +72,51 @@ typedef struct unsigned arg; } fio_header; +typedef enum +{ + FIO_LOCAL_HOST, /* data is locate at local host */ + FIO_DB_HOST, /* data is located at Postgres server host */ + FIO_BACKUP_HOST, /* data is located at backup host */ + FIO_REMOTE_HOST /* date is located at remote host */ +} fio_location; + extern fio_location MyLocation; -/* Check if FILE handle is local or remote (created by FIO) */ -#define fio_is_remote_file(file) ((size_t)(file) <= FIO_FDMAX) +extern void setMyLocation(ProbackupSubcmd const subcmd); +/* Check if specified location is local for current node */ +extern bool fio_is_remote(fio_location location); +extern bool fio_is_remote_simple(fio_location location); -extern void fio_redirect(int in, int out, int err); extern void fio_communicate(int in, int out); extern void fio_disconnect(void); - extern int fio_get_agent_version(void); + +#define FIO_FDMAX 64 +#define FIO_PIPE_MARKER 0x40000000 + +/* Check if FILE handle is local or remote (created by FIO) */ +#define fio_is_remote_file(file) ((size_t)(file) <= FIO_FDMAX) + +extern void fio_redirect(int in, int out, int err); extern void fio_error(int rc, int size, const char* file, int line); +#define SYS_CHECK(cmd) do if ((cmd) < 0) { fprintf(stderr, "%s:%d: (%s) %s\n", __FILE__, __LINE__, #cmd, strerror(errno)); exit(EXIT_FAILURE); } while (0) +#define IO_CHECK(cmd, size) do { int _rc = (cmd); if (_rc != (size)) fio_error(_rc, size, __FILE__, __LINE__); } while (0) + + +/* fd-style functions */ +extern int fio_open(fio_location location, const char* name, int mode); +extern ssize_t fio_write(int fd, void const* buf, size_t size); +extern ssize_t fio_write_async(int fd, void const* buf, size_t size); +extern int fio_check_error_fd(int fd, char **errmsg); +extern int fio_check_error_fd_gz(gzFile f, char **errmsg); +extern ssize_t fio_read(int fd, void* buf, size_t size); +extern int fio_flush(int fd); +extern int fio_seek(int fd, off_t offs); +extern int fio_fstat(int fd, struct stat* st); +extern int fio_truncate(int fd, off_t size); +extern int fio_close(int fd); + /* FILE-style functions */ extern FILE* fio_fopen(fio_location location, const char* name, const char* mode); extern size_t fio_fwrite(FILE* f, void const* buf, size_t size); @@ -116,18 +135,16 @@ extern int fio_ffstat(FILE* f, struct stat* st); extern FILE* fio_open_stream(fio_location location, const char* name); extern int fio_close_stream(FILE* f); -/* fd-style functions */ -extern int fio_open(fio_location location, const char* name, int mode); -extern ssize_t fio_write(int fd, void const* buf, size_t size); -extern ssize_t fio_write_async(int fd, void const* buf, size_t size); -extern int fio_check_error_fd(int fd, char **errmsg); -extern int fio_check_error_fd_gz(gzFile f, char **errmsg); -extern ssize_t fio_read(int fd, void* buf, size_t size); -extern int fio_flush(int fd); -extern int fio_seek(int fd, off_t offs); -extern int fio_fstat(int fd, struct stat* st); -extern int fio_truncate(int fd, off_t size); -extern int fio_close(int fd); +/* gzFile-style functions */ +#ifdef HAVE_LIBZ +extern gzFile fio_gzopen(fio_location location, const char* path, const char* mode, int level); +extern int fio_gzclose(gzFile file); +extern int fio_gzread(gzFile f, void *buf, unsigned size); +extern int fio_gzwrite(gzFile f, void const* buf, unsigned size); +extern int fio_gzeof(gzFile f); +extern z_off_t fio_gzseek(gzFile f, z_off_t offset, int whence); +extern const char* fio_gzerror(gzFile file, int *errnum); +#endif /* DIR-style functions */ extern DIR* fio_opendir(fio_location location, const char* path); @@ -149,15 +166,14 @@ extern bool fio_is_same_file(fio_location location, const char* filename1, co extern ssize_t fio_readlink(fio_location location, const char *path, char *value, size_t valsiz); extern pid_t fio_check_postmaster(fio_location location, const char *pgdata); -/* gzFile-style functions */ -#ifdef HAVE_LIBZ -extern gzFile fio_gzopen(fio_location location, const char* path, const char* mode, int level); -extern int fio_gzclose(gzFile file); -extern int fio_gzread(gzFile f, void *buf, unsigned size); -extern int fio_gzwrite(gzFile f, void const* buf, unsigned size); -extern int fio_gzeof(gzFile f); -extern z_off_t fio_gzseek(gzFile f, z_off_t offset, int whence); -extern const char* fio_gzerror(gzFile file, int *errnum); -#endif +extern void fio_list_dir(parray *files, const char *root, bool exclude, bool follow_symlink, + bool add_root, bool backup_logs, bool skip_hidden, int external_dir_num); + +struct PageState; /* defined in pg_probackup.h */ +extern struct PageState *fio_get_checksum_map(fio_location location, const char *fullpath, uint32 checksum_version, + int n_blocks, XLogRecPtr dest_stop_lsn, BlockNumber segmentno); +struct datapagemap; /* defined in datapagemap.h */ +extern struct datapagemap *fio_get_lsn_map(fio_location location, const char *fullpath, uint32 checksum_version, + int n_blocks, XLogRecPtr horizonLsn, BlockNumber segmentno); #endif From d6d64ddc23144ec47e41d2199bbf485388675600 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 7 Jan 2022 12:34:30 +0300 Subject: [PATCH 009/339] [refactoring] move fio_location argument in first place (part 2) --- src/backup.c | 5 +++-- src/catchup.c | 17 ++++++++-------- src/data.c | 4 ++-- src/fetch.c | 2 +- src/init.c | 2 +- src/pg_probackup.c | 6 +++--- src/pg_probackup.h | 18 ++++++++-------- src/restore.c | 2 +- src/util.c | 51 ++++++++++++++++++++++++---------------------- 9 files changed, 56 insertions(+), 51 deletions(-) diff --git a/src/backup.c b/src/backup.c index 2a5d0a30f..19120e7d7 100644 --- a/src/backup.c +++ b/src/backup.c @@ -136,7 +136,8 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, #if PG_VERSION_NUM >= 90600 current.tli = get_current_timeline(backup_conn); #else - current.tli = get_current_timeline_from_control(instance_config.pgdata, FIO_DB_HOST, false); + /* PG-9.5 */ + current.tli = get_current_timeline_from_control(FIO_DB_HOST, instance_config.pgdata, false); #endif /* @@ -943,7 +944,7 @@ check_system_identifiers(PGconn *conn, const char *pgdata) uint64 system_id_conn; uint64 system_id_pgdata; - system_id_pgdata = get_system_identifier(pgdata, FIO_DB_HOST, false); + system_id_pgdata = get_system_identifier(FIO_DB_HOST, pgdata, false); system_id_conn = get_remote_system_identifier(conn); /* for checkdb check only system_id_pgdata and system_id_conn */ diff --git a/src/catchup.c b/src/catchup.c index 79e6039a0..4a2cc5c46 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -48,7 +48,7 @@ catchup_init_state(PGNodeInfo *source_node_info, const char *source_pgdata, cons /* Get WAL segments size and system ID of source PG instance */ instance_config.xlog_seg_size = get_xlog_seg_size(source_pgdata); - instance_config.system_identifier = get_system_identifier(source_pgdata, FIO_DB_HOST, false); + instance_config.system_identifier = get_system_identifier(FIO_DB_HOST, source_pgdata, false); current.start_time = time(NULL); strlcpy(current.program_version, PROGRAM_VERSION, sizeof(current.program_version)); @@ -69,8 +69,9 @@ catchup_init_state(PGNodeInfo *source_node_info, const char *source_pgdata, cons #if PG_VERSION_NUM >= 90600 current.tli = get_current_timeline(source_conn); #else + /* PG-9.5 */ instance_config.pgdata = source_pgdata; - current.tli = get_current_timeline_from_control(source_pgdata, FIO_DB_HOST, false); + current.tli = get_current_timeline_from_control(FIO_DB_HOST, source_pgdata, false); #endif elog(INFO, "Catchup start, pg_probackup version: %s, " @@ -163,7 +164,7 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, uint64 source_conn_id, source_id, dest_id; source_conn_id = get_remote_system_identifier(source_conn); - source_id = get_system_identifier(source_pgdata, FIO_DB_HOST, false); /* same as instance_config.system_identifier */ + source_id = get_system_identifier(FIO_DB_HOST, source_pgdata, false); /* same as instance_config.system_identifier */ if (source_conn_id != source_id) elog(ERROR, "Database identifiers mismatch: we connected to DB id %lu, but in \"%s\" we found id %lu", @@ -171,7 +172,7 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, if (current.backup_mode != BACKUP_MODE_FULL) { - dest_id = get_system_identifier(dest_pgdata, FIO_LOCAL_HOST, false); + dest_id = get_system_identifier(FIO_LOCAL_HOST, dest_pgdata, false); if (source_conn_id != dest_id) elog(ERROR, "Database identifiers mismatch: we connected to DB id %lu, but in \"%s\" we found id %lu", source_conn_id, dest_pgdata, dest_id); @@ -202,7 +203,7 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, RedoParams dest_redo = { 0, InvalidXLogRecPtr, 0 }; /* fill dest_redo.lsn and dest_redo.tli */ - get_redo(dest_pgdata, FIO_LOCAL_HOST, &dest_redo); + get_redo(FIO_LOCAL_HOST, dest_pgdata, &dest_redo); elog(VERBOSE, "source.tli = %X, dest_redo.lsn = %X/%X, dest_redo.tli = %X", current.tli, (uint32) (dest_redo.lsn >> 32), (uint32) dest_redo.lsn, dest_redo.tli); @@ -656,7 +657,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, filter_filelist(dest_filelist, dest_pgdata, exclude_absolute_paths_list, exclude_relative_paths_list, "Destination"); // fill dest_redo.lsn and dest_redo.tli - get_redo(dest_pgdata, FIO_LOCAL_HOST, &dest_redo); + get_redo(FIO_LOCAL_HOST, dest_pgdata, &dest_redo); elog(INFO, "syncLSN = %X/%X", (uint32) (dest_redo.lsn >> 32), (uint32) dest_redo.lsn); /* @@ -969,8 +970,8 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, char to_fullpath[MAXPGPATH]; join_path_components(from_fullpath, source_pgdata, source_pg_control_file->rel_path); join_path_components(to_fullpath, dest_pgdata, source_pg_control_file->rel_path); - copy_pgcontrol_file(from_fullpath, FIO_DB_HOST, - to_fullpath, FIO_LOCAL_HOST, source_pg_control_file); + copy_pgcontrol_file(FIO_DB_HOST, from_fullpath, + FIO_LOCAL_HOST, to_fullpath, source_pg_control_file); transfered_datafiles_bytes += source_pg_control_file->size; } diff --git a/src/data.c b/src/data.c index 41615fb33..9b95a0308 100644 --- a/src/data.c +++ b/src/data.c @@ -787,8 +787,8 @@ backup_non_data_file(pgFile *file, pgFile *prev_file, /* special treatment for global/pg_control */ if (file->external_dir_num == 0 && strcmp(file->rel_path, XLOG_CONTROL_FILE) == 0) { - copy_pgcontrol_file(from_fullpath, FIO_DB_HOST, - to_fullpath, FIO_BACKUP_HOST, file); + copy_pgcontrol_file(FIO_DB_HOST, from_fullpath, + FIO_BACKUP_HOST, to_fullpath, file); return; } diff --git a/src/fetch.c b/src/fetch.c index 2609b3220..bbea7bffe 100644 --- a/src/fetch.c +++ b/src/fetch.c @@ -25,7 +25,7 @@ * */ char * -slurpFile(const char *datadir, const char *path, size_t *filesize, bool safe, fio_location location) +slurpFile(fio_location location, const char *datadir, const char *path, size_t *filesize, bool safe) { int fd; char *buffer; diff --git a/src/init.c b/src/init.c index 8773016b5..055c6d7ef 100644 --- a/src/init.c +++ b/src/init.c @@ -57,7 +57,7 @@ do_add_instance(InstanceState *instanceState, InstanceConfig *instance) "(-D, --pgdata)"); /* Read system_identifier from PGDATA */ - instance->system_identifier = get_system_identifier(instance->pgdata, FIO_DB_HOST, false); + instance->system_identifier = get_system_identifier(FIO_DB_HOST, instance->pgdata, false); /* Starting from PostgreSQL 11 read WAL segment size from PGDATA */ instance->xlog_seg_size = get_xlog_seg_size(instance->pgdata); diff --git a/src/pg_probackup.c b/src/pg_probackup.c index d713babdd..d819e1fa7 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -833,7 +833,7 @@ main(int argc, char *argv[]) if (wal_file_path == NULL) { /* 1st case */ - system_id = get_system_identifier(current_dir, FIO_DB_HOST, false); + system_id = get_system_identifier(FIO_DB_HOST, current_dir, false); join_path_components(archive_push_xlog_dir, current_dir, XLOGDIR); } else @@ -852,7 +852,7 @@ main(int argc, char *argv[]) if (fio_is_same_file(FIO_DB_HOST, stripped_wal_file_path, archive_push_xlog_dir, true)) { /* 2nd case */ - system_id = get_system_identifier(instance_config.pgdata, FIO_DB_HOST, false); + system_id = get_system_identifier(FIO_DB_HOST, instance_config.pgdata, false); /* archive_push_xlog_dir already have right value */ } else @@ -862,7 +862,7 @@ main(int argc, char *argv[]) else elog(ERROR, "Value specified to --wal_file_path is too long"); - system_id = get_system_identifier(current_dir, FIO_DB_HOST, true); + system_id = get_system_identifier(FIO_DB_HOST, current_dir, true); /* 3rd case if control file present -- i.e. system_id != 0 */ if (system_id == 0) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index ea5d930bc..4b93098b2 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -914,11 +914,11 @@ extern void do_delete_status(InstanceState *instanceState, InstanceConfig *instance_config, const char *status); /* in fetch.c */ -extern char *slurpFile(const char *datadir, +extern char *slurpFile(fio_location location, + const char *datadir, const char *path, size_t *filesize, - bool safe, - fio_location location); + bool safe); extern char *fetchFile(PGconn *conn, const char *filename, size_t *filesize); /* in help.c */ @@ -1148,19 +1148,19 @@ extern XLogRecPtr get_next_record_lsn(const char *archivedir, XLogSegNo segno, T /* in util.c */ extern TimeLineID get_current_timeline(PGconn *conn); -extern TimeLineID get_current_timeline_from_control(const char *pgdata_path, fio_location location, bool safe); +extern TimeLineID get_current_timeline_from_control(fio_location location, const char *pgdata_path, bool safe); extern XLogRecPtr get_checkpoint_location(PGconn *conn); -extern uint64 get_system_identifier(const char *pgdata_path, fio_location location, bool safe); +extern uint64 get_system_identifier(fio_location location, const char *pgdata_path, bool safe); extern uint64 get_remote_system_identifier(PGconn *conn); extern uint32 get_data_checksum_version(bool safe); extern pg_crc32c get_pgcontrol_checksum(const char *pgdata_path); -extern DBState get_system_dbstate(const char *pgdata_path, fio_location location); +extern DBState get_system_dbstate(fio_location location, const char *pgdata_path); extern uint32 get_xlog_seg_size(const char *pgdata_path); -extern void get_redo(const char *pgdata_path, fio_location pgdata_location, RedoParams *redo); +extern void get_redo(fio_location location, const char *pgdata_path, RedoParams *redo); extern void set_min_recovery_point(pgFile *file, const char *backup_path, XLogRecPtr stop_backup_lsn); -extern void copy_pgcontrol_file(const char *from_fullpath, fio_location from_location, - const char *to_fullpath, fio_location to_location, pgFile *file); +extern void copy_pgcontrol_file(fio_location from_location, const char *from_fullpath, + fio_location to_location, const char *to_fullpath, pgFile *file); extern void time2iso(char *buf, size_t len, time_t time, bool utc); extern const char *status2str(BackupStatus status); diff --git a/src/restore.c b/src/restore.c index 0fd9fa44c..8ecef045a 100644 --- a/src/restore.c +++ b/src/restore.c @@ -485,7 +485,7 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg { RedoParams redo; parray *timelines = NULL; - get_redo(instance_config.pgdata, FIO_DB_HOST, &redo); + get_redo(FIO_DB_HOST, instance_config.pgdata, &redo); if (redo.checksum_version == 0) elog(ERROR, "Incremental restore in 'lsn' mode require " diff --git a/src/util.c b/src/util.c index 16cc01dad..e89f5776b 100644 --- a/src/util.c +++ b/src/util.c @@ -122,7 +122,7 @@ digestControlFile(ControlFileData *ControlFile, char *src, size_t size) * Write ControlFile to pg_control */ static void -writeControlFile(ControlFileData *ControlFile, const char *path, fio_location location) +writeControlFile(fio_location location, const char *path, ControlFileData *ControlFile) { int fd; char *buffer = NULL; @@ -172,7 +172,7 @@ get_current_timeline(PGconn *conn) if (PQresultStatus(res) == PGRES_TUPLES_OK) val = PQgetvalue(res, 0, 0); else - return get_current_timeline_from_control(instance_config.pgdata, FIO_DB_HOST, false); + return get_current_timeline_from_control(FIO_DB_HOST, instance_config.pgdata, false); if (!parse_uint32(val, &tli, 0)) { @@ -180,7 +180,7 @@ get_current_timeline(PGconn *conn) elog(WARNING, "Invalid value of timeline_id %s", val); /* TODO 3.0 remove it and just error out */ - return get_current_timeline_from_control(instance_config.pgdata, FIO_DB_HOST, false); + return get_current_timeline_from_control(FIO_DB_HOST, instance_config.pgdata, false); } return tli; @@ -188,15 +188,15 @@ get_current_timeline(PGconn *conn) /* Get timeline from pg_control file */ TimeLineID -get_current_timeline_from_control(const char *pgdata_path, fio_location location, bool safe) +get_current_timeline_from_control(fio_location location, const char *pgdata_path, bool safe) { ControlFileData ControlFile; char *buffer; size_t size; /* First fetch file... */ - buffer = slurpFile(pgdata_path, XLOG_CONTROL_FILE, &size, - safe, location); + buffer = slurpFile(location, pgdata_path, XLOG_CONTROL_FILE, + &size, safe); if (safe && buffer == NULL) return 0; @@ -234,11 +234,12 @@ get_checkpoint_location(PGconn *conn) return lsn; #else + /* PG-9.5 */ char *buffer; size_t size; ControlFileData ControlFile; - buffer = slurpFile(instance_config.pgdata, XLOG_CONTROL_FILE, &size, false, FIO_DB_HOST); + buffer = slurpFile(FIO_DB_HOST, instance_config.pgdata, XLOG_CONTROL_FILE, &size, false); digestControlFile(&ControlFile, buffer, size); pg_free(buffer); @@ -247,14 +248,14 @@ get_checkpoint_location(PGconn *conn) } uint64 -get_system_identifier(const char *pgdata_path, fio_location location, bool safe) +get_system_identifier(fio_location location, const char *pgdata_path, bool safe) { ControlFileData ControlFile; char *buffer; size_t size; /* First fetch file... */ - buffer = slurpFile(pgdata_path, XLOG_CONTROL_FILE, &size, safe, location); + buffer = slurpFile(location, pgdata_path, XLOG_CONTROL_FILE, &size, safe); if (safe && buffer == NULL) return 0; digestControlFile(&ControlFile, buffer, size); @@ -284,11 +285,12 @@ get_remote_system_identifier(PGconn *conn) return system_id_conn; #else + /* PG-9.5 */ char *buffer; size_t size; ControlFileData ControlFile; - buffer = slurpFile(instance_config.pgdata, XLOG_CONTROL_FILE, &size, false, FIO_DB_HOST); + buffer = slurpFile(FIO_DB_HOST, instance_config.pgdata, XLOG_CONTROL_FILE, &size, false); digestControlFile(&ControlFile, buffer, size); pg_free(buffer); @@ -305,7 +307,7 @@ get_xlog_seg_size(const char *pgdata_path) size_t size; /* First fetch file... */ - buffer = slurpFile(pgdata_path, XLOG_CONTROL_FILE, &size, false, FIO_DB_HOST); + buffer = slurpFile(FIO_DB_HOST, pgdata_path, XLOG_CONTROL_FILE, &size, false); digestControlFile(&ControlFile, buffer, size); pg_free(buffer); @@ -323,8 +325,8 @@ get_data_checksum_version(bool safe) size_t size; /* First fetch file... */ - buffer = slurpFile(instance_config.pgdata, XLOG_CONTROL_FILE, &size, - safe, FIO_DB_HOST); + buffer = slurpFile(FIO_DB_HOST, instance_config.pgdata, XLOG_CONTROL_FILE, + &size, safe); if (buffer == NULL) return 0; digestControlFile(&ControlFile, buffer, size); @@ -341,7 +343,7 @@ get_pgcontrol_checksum(const char *pgdata_path) size_t size; /* First fetch file... */ - buffer = slurpFile(pgdata_path, XLOG_CONTROL_FILE, &size, false, FIO_BACKUP_HOST); + buffer = slurpFile(FIO_BACKUP_HOST, pgdata_path, XLOG_CONTROL_FILE, &size, false); digestControlFile(&ControlFile, buffer, size); pg_free(buffer); @@ -349,14 +351,15 @@ get_pgcontrol_checksum(const char *pgdata_path) return ControlFile.crc; } +/* unused function */ DBState -get_system_dbstate(const char *pgdata_path, fio_location location) +get_system_dbstate(fio_location location, const char *pgdata_path) { ControlFileData ControlFile; char *buffer; size_t size; - buffer = slurpFile(pgdata_path, XLOG_CONTROL_FILE, &size, false, location); + buffer = slurpFile(location, pgdata_path, XLOG_CONTROL_FILE, &size, false); if (buffer == NULL) return 0; digestControlFile(&ControlFile, buffer, size); @@ -366,14 +369,14 @@ get_system_dbstate(const char *pgdata_path, fio_location location) } void -get_redo(const char *pgdata_path, fio_location pgdata_location, RedoParams *redo) +get_redo(fio_location location, const char *pgdata_path, RedoParams *redo) { ControlFileData ControlFile; char *buffer; size_t size; /* First fetch file... */ - buffer = slurpFile(pgdata_path, XLOG_CONTROL_FILE, &size, false, pgdata_location); + buffer = slurpFile(location, pgdata_path, XLOG_CONTROL_FILE, &size, false); digestControlFile(&ControlFile, buffer, size); pg_free(buffer); @@ -412,7 +415,7 @@ set_min_recovery_point(pgFile *file, const char *backup_path, char fullpath[MAXPGPATH]; /* First fetch file content */ - buffer = slurpFile(instance_config.pgdata, XLOG_CONTROL_FILE, &size, false, FIO_DB_HOST); + buffer = slurpFile(FIO_DB_HOST, instance_config.pgdata, XLOG_CONTROL_FILE, &size, false); digestControlFile(&ControlFile, buffer, size); elog(LOG, "Current minRecPoint %X/%X", @@ -433,7 +436,7 @@ set_min_recovery_point(pgFile *file, const char *backup_path, /* overwrite pg_control */ join_path_components(fullpath, backup_path, XLOG_CONTROL_FILE); - writeControlFile(&ControlFile, fullpath, FIO_LOCAL_HOST); + writeControlFile(FIO_LOCAL_HOST, fullpath, &ControlFile); /* Update pg_control checksum in backup_list */ file->crc = ControlFile.crc; @@ -445,14 +448,14 @@ set_min_recovery_point(pgFile *file, const char *backup_path, * Copy pg_control file to backup. We do not apply compression to this file. */ void -copy_pgcontrol_file(const char *from_fullpath, fio_location from_location, - const char *to_fullpath, fio_location to_location, pgFile *file) +copy_pgcontrol_file(fio_location from_location, const char *from_fullpath, + fio_location to_location, const char *to_fullpath, pgFile *file) { ControlFileData ControlFile; char *buffer; size_t size; - buffer = slurpFile(from_fullpath, "", &size, false, from_location); + buffer = slurpFile(from_location, from_fullpath, "", &size, false); digestControlFile(&ControlFile, buffer, size); @@ -461,7 +464,7 @@ copy_pgcontrol_file(const char *from_fullpath, fio_location from_location, file->write_size = size; file->uncompressed_size = size; - writeControlFile(&ControlFile, to_fullpath, to_location); + writeControlFile(to_location, to_fullpath, &ControlFile); pg_free(buffer); } From a2b7f3b36bd362482efd0fe8bbfb71c8482187c2 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 7 Jan 2022 13:14:33 +0300 Subject: [PATCH 010/339] followup for 4d7714e --- src/restore.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/restore.c b/src/restore.c index 8ecef045a..d1c50769c 100644 --- a/src/restore.c +++ b/src/restore.c @@ -2194,7 +2194,7 @@ check_incremental_compatibility(const char *pgdata, uint64 system_identifier, */ elog(INFO, "Trying to read pg_control file in destination directory"); - system_id_pgdata = get_system_identifier(pgdata, FIO_DB_HOST, false); + system_id_pgdata = get_system_identifier(FIO_DB_HOST, pgdata, false); if (system_id_pgdata == instance_config.system_identifier) system_id_match = true; From 9e9e86f7b542afd456adc9a1918a33736b86e380 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 7 Jan 2022 13:17:36 +0300 Subject: [PATCH 011/339] [refactoring] unification for some fio_ functions --- src/utils/file.c | 97 ++++++++++++++++++++++++++---------------------- 1 file changed, 52 insertions(+), 45 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index 167960699..af76e0f12 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -603,17 +603,19 @@ fio_close(int fd) { if (fio_is_remote_fd(fd)) { - fio_header hdr; + fio_header hdr = { + .cop = FIO_CLOSE, + .handle = fd & ~FIO_PIPE_MARKER, + .size = 0, + .arg = 0, + }; - hdr.cop = FIO_CLOSE; - hdr.handle = fd & ~FIO_PIPE_MARKER; - hdr.size = 0; fio_fdset &= ~(1 << hdr.handle); - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); /* Wait for response */ IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + Assert(hdr.arg == FIO_CLOSE); if (hdr.arg != 0) { @@ -633,10 +635,12 @@ fio_close(int fd) static void fio_close_impl(int fd, int out) { - fio_header hdr; - - hdr.cop = FIO_CLOSE; - hdr.arg = 0; + fio_header hdr = { + .cop = FIO_CLOSE, + .handle = -1, + .size = 0, + .arg = 0, + }; if (close(fd) != 0) hdr.arg = errno; @@ -662,12 +666,12 @@ fio_truncate(int fd, off_t size) { if (fio_is_remote_fd(fd)) { - fio_header hdr; - - hdr.cop = FIO_TRUNCATE; - hdr.handle = fd & ~FIO_PIPE_MARKER; - hdr.size = 0; - hdr.arg = size; + fio_header hdr = { + .cop = FIO_TRUNCATE, + .handle = fd & ~FIO_PIPE_MARKER, + .size = 0, + .arg = size, + }; IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); @@ -815,17 +819,19 @@ fio_write(int fd, void const* buf, size_t size) { if (fio_is_remote_fd(fd)) { - fio_header hdr; - - hdr.cop = FIO_WRITE; - hdr.handle = fd & ~FIO_PIPE_MARKER; - hdr.size = size; + fio_header hdr = { + .cop = FIO_WRITE, + .handle = fd & ~FIO_PIPE_MARKER, + .size = size, + .arg = 0, + }; IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_write_all(fio_stdout, buf, size), size); /* check results */ IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + Assert(hdr.arg == FIO_WRITE); /* set errno */ if (hdr.arg > 0) @@ -845,14 +851,16 @@ fio_write(int fd, void const* buf, size_t size) static void fio_write_impl(int fd, void const* buf, size_t size, int out) { + fio_header hdr = { + .cop = FIO_WRITE, + .handle = -1, + .size = 0, + .arg = 0, + }; int rc; - fio_header hdr; rc = durable_write(fd, buf, size); - hdr.arg = 0; - hdr.size = 0; - if (rc < 0) hdr.arg = errno; @@ -880,19 +888,19 @@ fio_write_async(int fd, void const* buf, size_t size) if (fio_is_remote_fd(fd)) { - fio_header hdr; - - hdr.cop = FIO_WRITE_ASYNC; - hdr.handle = fd & ~FIO_PIPE_MARKER; - hdr.size = size; + fio_header hdr = { + .cop = FIO_WRITE_ASYNC, + .handle = fd & ~FIO_PIPE_MARKER, + .size = size, + .arg = 0, + }; IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_write_all(fio_stdout, buf, size), size); + return size; } else return durable_write(fd, buf, size); - - return size; } static void @@ -1089,12 +1097,12 @@ fio_read(int fd, void* buf, size_t size) { if (fio_is_remote_fd(fd)) { - fio_header hdr; - - hdr.cop = FIO_READ; - hdr.handle = fd & ~FIO_PIPE_MARKER; - hdr.size = 0; - hdr.arg = size; + fio_header hdr = { + .cop = FIO_READ, + .handle = fd & ~FIO_PIPE_MARKER, + .size = 0, + .arg = size, + }; IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); @@ -1116,16 +1124,15 @@ fio_stat(fio_location location, const char* path, struct stat* st, bool follow_s { if (fio_is_remote(location)) { - fio_header hdr; - size_t path_len = strlen(path) + 1; - - hdr.cop = FIO_STAT; - hdr.handle = -1; - hdr.arg = follow_symlink; - hdr.size = path_len; + fio_header hdr = { + .cop = FIO_STAT, + .handle = -1, + .size = strlen(path) + 1, + .arg = follow_symlink, + }; IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(fio_stdout, path, path_len), path_len); + IO_CHECK(fio_write_all(fio_stdout, path, hdr.size), hdr.size); IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); Assert(hdr.cop == FIO_STAT); From efedbc27814207175a8770cc7464911a5b348736 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 7 Jan 2022 20:49:22 +0300 Subject: [PATCH 012/339] [refactoring] remove dir_create_dir() calls and use fio_mkdir() instead --- src/backup.c | 4 +- src/catalog.c | 6 +-- src/catchup.c | 6 +-- src/dir.c | 34 +---------------- src/init.c | 10 ++--- src/merge.c | 2 +- src/pg_probackup.h | 1 - src/restore.c | 4 +- src/utils/file.c | 94 ++++++++++++++++++++++++++++++++++------------ src/utils/file.h | 2 +- 10 files changed, 89 insertions(+), 74 deletions(-) diff --git a/src/backup.c b/src/backup.c index 19120e7d7..84b503245 100644 --- a/src/backup.c +++ b/src/backup.c @@ -261,7 +261,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, char stream_xlog_path[MAXPGPATH]; join_path_components(stream_xlog_path, current.database_dir, PG_XLOG_DIR); - fio_mkdir(FIO_BACKUP_HOST, stream_xlog_path, DIR_PERMISSION); + fio_mkdir(FIO_BACKUP_HOST, stream_xlog_path, DIR_PERMISSION, false); start_WAL_streaming(backup_conn, stream_xlog_path, &instance_config.conn_opt, current.start_lsn, current.tli, true); @@ -414,7 +414,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, join_path_components(dirpath, current.database_dir, file->rel_path); elog(VERBOSE, "Create directory '%s'", dirpath); - fio_mkdir(FIO_BACKUP_HOST, dirpath, DIR_PERMISSION); + fio_mkdir(FIO_BACKUP_HOST, dirpath, DIR_PERMISSION, false); } } diff --git a/src/catalog.c b/src/catalog.c index b0cae0f4f..afe438ba3 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1424,7 +1424,6 @@ get_multi_timeline_parent(parray *backup_list, parray *tli_list, * It may be ok or maybe not, so it's up to the caller * to fix it or let it be. */ - void pgBackupCreateDir(pgBackup *backup, const char *backup_instance_path) { @@ -1467,7 +1466,7 @@ pgBackupCreateDir(pgBackup *backup, const char *backup_instance_path) char path[MAXPGPATH]; join_path_components(path, backup->root_dir, parray_get(subdirs, i)); - fio_mkdir(FIO_BACKUP_HOST, path, DIR_PERMISSION); + fio_mkdir(FIO_BACKUP_HOST, path, DIR_PERMISSION, false); } free_dir_list(subdirs); @@ -1490,8 +1489,7 @@ create_backup_dir(pgBackup *backup, const char *backup_instance_path) join_path_components(path, backup_instance_path, base36enc(backup_id)); - /* TODO: add wrapper for remote mode */ - rc = dir_create_dir(path, DIR_PERMISSION, true); + rc = fio_mkdir(FIO_BACKUP_HOST, path, DIR_PERMISSION, true); if (rc == 0) { diff --git a/src/catchup.c b/src/catchup.c index 4a2cc5c46..ec707f084 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -707,7 +707,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, /* Start stream replication */ join_path_components(dest_xlog_path, dest_pgdata, PG_XLOG_DIR); - fio_mkdir(FIO_LOCAL_HOST, dest_xlog_path, DIR_PERMISSION); + fio_mkdir(FIO_LOCAL_HOST, dest_xlog_path, DIR_PERMISSION, false); start_WAL_streaming(source_conn, dest_xlog_path, &instance_config.conn_opt, current.start_lsn, current.tli, false); @@ -823,7 +823,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, join_path_components(dirpath, dest_pgdata, file->rel_path); elog(VERBOSE, "Create directory '%s'", dirpath); - fio_mkdir(FIO_LOCAL_HOST, dirpath, DIR_PERMISSION); + fio_mkdir(FIO_LOCAL_HOST, dirpath, DIR_PERMISSION, false); } else { @@ -855,7 +855,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, linked_path, to_path); /* create tablespace directory */ - if (fio_mkdir(FIO_LOCAL_HOST, linked_path, file->mode) != 0) + if (fio_mkdir(FIO_LOCAL_HOST, linked_path, file->mode, false) != 0) elog(ERROR, "Could not create tablespace directory \"%s\": %s", linked_path, strerror(errno)); diff --git a/src/dir.c b/src/dir.c index e68ae6a17..a90cb24f3 100644 --- a/src/dir.c +++ b/src/dir.c @@ -135,36 +135,6 @@ static TablespaceList tablespace_dirs = {NULL, NULL}; /* Extra directories mapping */ static TablespaceList external_remap_list = {NULL, NULL}; -/* - * Create directory, also create parent directories if necessary. - * In strict mode treat already existing directory as error. - * Return values: - * 0 - ok - * -1 - error (check errno) - */ -int -dir_create_dir(const char *dir, mode_t mode, bool strict) -{ - char parent[MAXPGPATH]; - - strncpy(parent, dir, MAXPGPATH); - get_parent_directory(parent); - - /* Create parent first */ - if (access(parent, F_OK) == -1) - dir_create_dir(parent, mode, false); - - /* Create directory */ - if (mkdir(dir, mode) == -1) - { - if (errno == EEXIST && !strict) /* already exist */ - return 0; - return -1; - } - - return 0; -} - pgFile * pgFileNew(const char *path, const char *rel_path, bool follow_symlink, int external_dir_num, fio_location location) @@ -1106,7 +1076,7 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba linked_path, to_path); /* create tablespace directory */ - fio_mkdir(location, linked_path, pg_tablespace_mode); + fio_mkdir(location, linked_path, pg_tablespace_mode, false); /* create link to linked_path */ if (fio_symlink(location, linked_path, to_path, incremental) < 0) @@ -1124,7 +1094,7 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba join_path_components(to_path, data_dir, dir->rel_path); // TODO check exit code - fio_mkdir(location, to_path, dir->mode); + fio_mkdir(location, to_path, dir->mode, false); } if (extract_tablespaces) diff --git a/src/init.c b/src/init.c index 055c6d7ef..41ee2e3c9 100644 --- a/src/init.c +++ b/src/init.c @@ -33,13 +33,13 @@ do_init(CatalogState *catalogState) } /* create backup catalog root directory */ - dir_create_dir(catalogState->catalog_path, DIR_PERMISSION, false); + fio_mkdir(FIO_BACKUP_HOST, catalogState->catalog_path, DIR_PERMISSION, false); /* create backup catalog data directory */ - dir_create_dir(catalogState->backup_subdir_path, DIR_PERMISSION, false); + fio_mkdir(FIO_BACKUP_HOST, catalogState->backup_subdir_path, DIR_PERMISSION, false); /* create backup catalog wal directory */ - dir_create_dir(catalogState->wal_subdir_path, DIR_PERMISSION, false); + fio_mkdir(FIO_BACKUP_HOST, catalogState->wal_subdir_path, DIR_PERMISSION, false); elog(INFO, "Backup catalog '%s' successfully inited", catalogState->catalog_path); return 0; @@ -86,8 +86,8 @@ do_add_instance(InstanceState *instanceState, InstanceConfig *instance) instanceState->instance_name, instanceState->instance_wal_subdir_path); /* Create directory for data files of this specific instance */ - dir_create_dir(instanceState->instance_backup_subdir_path, DIR_PERMISSION, false); - dir_create_dir(instanceState->instance_wal_subdir_path, DIR_PERMISSION, false); + fio_mkdir(FIO_BACKUP_HOST, instanceState->instance_backup_subdir_path, DIR_PERMISSION, false); + fio_mkdir(FIO_BACKUP_HOST, instanceState->instance_wal_subdir_path, DIR_PERMISSION, false); /* * Write initial configuration file. diff --git a/src/merge.c b/src/merge.c index 16acf49ea..a23079b16 100644 --- a/src/merge.c +++ b/src/merge.c @@ -646,7 +646,7 @@ merge_chain(InstanceState *instanceState, makeExternalDirPathByNum(new_container, full_external_prefix, file->external_dir_num); join_path_components(dirpath, new_container, file->rel_path); - dir_create_dir(dirpath, DIR_PERMISSION, false); + fio_mkdir(FIO_BACKUP_HOST, dirpath, DIR_PERMISSION, false); } pg_atomic_init_flag(&file->lock); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 4b93098b2..b312bd77f 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1044,7 +1044,6 @@ extern void makeExternalDirPathByNum(char *ret_path, const char *pattern_path, const int dir_num); extern bool backup_contains_external(const char *dir, parray *dirs_list); -extern int dir_create_dir(const char *path, mode_t mode, bool strict); extern bool dir_is_empty(const char *path, fio_location location); extern bool fileExists(const char *path, fio_location location); diff --git a/src/restore.c b/src/restore.c index d1c50769c..0e92e4935 100644 --- a/src/restore.c +++ b/src/restore.c @@ -818,7 +818,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, for (i = 0; i < parray_num(external_dirs); i++) fio_mkdir(FIO_DB_HOST, parray_get(external_dirs, i), - DIR_PERMISSION); + DIR_PERMISSION, false); } /* @@ -844,7 +844,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, join_path_components(dirpath, external_path, file->rel_path); elog(VERBOSE, "Create external directory \"%s\"", dirpath); - fio_mkdir(FIO_DB_HOST, dirpath, file->mode); + fio_mkdir(FIO_DB_HOST, dirpath, file->mode, false); } } diff --git a/src/utils/file.c b/src/utils/file.c index af76e0f12..e04a92707 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1224,15 +1224,15 @@ fio_access(fio_location location, const char* path, int mode) { if (fio_is_remote(location)) { - fio_header hdr; - size_t path_len = strlen(path) + 1; - hdr.cop = FIO_ACCESS; - hdr.handle = -1; - hdr.size = path_len; - hdr.arg = mode; + fio_header hdr = { + .cop = FIO_ACCESS, + .handle = -1, + .size = strlen(path) + 1, + .arg = mode, + }; IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(fio_stdout, path, path_len), path_len); + IO_CHECK(fio_write_all(fio_stdout, path, hdr.size), hdr.size); IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); Assert(hdr.cop == FIO_ACCESS); @@ -1460,7 +1460,6 @@ fio_remove(fio_location location, const char* path, bool missing_ok) return result; } - static void fio_remove_impl(const char* path, bool missing_ok, int out) { @@ -1480,35 +1479,86 @@ fio_remove_impl(const char* path, bool missing_ok, int out) IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); } -/* Create directory - * TODO: add strict flag +/* + * Create directory, also create parent directories if necessary. + * In strict mode treat already existing directory as error. + * Return values: + * 0 - ok + * -1 - error (check errno) + */ +static int +dir_create_dir(const char *dir, mode_t mode, bool strict) +{ + char parent[MAXPGPATH]; + + strncpy(parent, dir, MAXPGPATH); + get_parent_directory(parent); + + /* Create parent first */ + if (access(parent, F_OK) == -1) + dir_create_dir(parent, mode, false); + + /* Create directory */ + if (mkdir(dir, mode) == -1) + { + if (errno == EEXIST && !strict) /* already exist */ + return 0; + return -1; + } + + return 0; +} + +/* + * Create directory */ int -fio_mkdir(fio_location location, const char* path, int mode) +fio_mkdir(fio_location location, const char* path, int mode, bool strict) { if (fio_is_remote(location)) { - fio_header hdr; - size_t path_len = strlen(path) + 1; - hdr.cop = FIO_MKDIR; - hdr.handle = -1; - hdr.size = path_len; - hdr.arg = mode; + fio_header hdr = { + .cop = FIO_MKDIR, + .handle = strict ? 1 : 0, /* ugly "hack" to pass more params*/ + .size = strlen(path) + 1, + .arg = mode, + }; IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(fio_stdout, path, path_len), path_len); + IO_CHECK(fio_write_all(fio_stdout, path, hdr.size), hdr.size); IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); Assert(hdr.cop == FIO_MKDIR); - return hdr.arg; + if (hdr.arg != 0) + { + errno = hdr.arg; + return -1; + } + return 0; } else { - return dir_create_dir(path, mode, false); + return dir_create_dir(path, mode, strict); } } +static void +fio_mkdir_impl(const char* path, int mode, bool strict, int out) +{ + fio_header hdr = { + .cop = FIO_MKDIR, + .handle = -1, + .size = 0, + .arg = 0, + }; + + if (dir_create_dir(path, mode, strict) != 0) + hdr.arg = errno; + + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); +} + /* Change file mode */ int fio_chmod(fio_location location, const char* path, int mode) @@ -3384,9 +3434,7 @@ fio_communicate(int in, int out) fio_remove_impl(buf, hdr.arg == 1, out); break; case FIO_MKDIR: /* Create directory */ - hdr.size = 0; - hdr.arg = dir_create_dir(buf, hdr.arg, false); - IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); + fio_mkdir_impl(buf, hdr.arg, hdr.handle == 1, out); break; case FIO_CHMOD: /* Change file mode */ SYS_CHECK(chmod(buf, hdr.arg)); diff --git a/src/utils/file.h b/src/utils/file.h index b5f168b54..5639a3e4c 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -158,7 +158,7 @@ extern pg_crc32 fio_get_crc32(fio_location location, const char *file_path, bool extern int fio_rename(fio_location location, const char* old_path, const char* new_path); extern int fio_symlink(fio_location location, const char* target, const char* link_path, bool overwrite); extern int fio_remove(fio_location location, const char* path, bool missing_ok); -extern int fio_mkdir(fio_location location, const char* path, int mode); +extern int fio_mkdir(fio_location location, const char* path, int mode, bool strict); extern int fio_chmod(fio_location location, const char* path, int mode); extern int fio_access(fio_location location, const char* path, int mode); extern int fio_stat(fio_location location, const char* path, struct stat* st, bool follow_symlinks); From 7e2785b9197fb00c14f7025e3cb72c209f964ed3 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 7 Jan 2022 21:15:15 +0300 Subject: [PATCH 013/339] [refactoring] remove pgFileSize() --- src/dir.c | 11 ----------- src/validate.c | 6 +++++- 2 files changed, 5 insertions(+), 12 deletions(-) diff --git a/src/dir.c b/src/dir.c index a90cb24f3..23b4f9121 100644 --- a/src/dir.c +++ b/src/dir.c @@ -1602,17 +1602,6 @@ fileExists(const char *path, fio_location location) return true; } -size_t -pgFileSize(const char *path) -{ - struct stat buf; - - if (stat(path, &buf) == -1) - elog(ERROR, "Cannot stat file \"%s\": %s", path, strerror(errno)); - - return buf.st_size; -} - /* * Construct parray containing remapped external directories paths * from string like /path1:/path2 diff --git a/src/validate.c b/src/validate.c index 4044ac158..d88de5583 100644 --- a/src/validate.c +++ b/src/validate.c @@ -200,10 +200,14 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params) (parse_program_version(backup->program_version) == 20201))) { char path[MAXPGPATH]; + struct stat st; join_path_components(path, backup->root_dir, DATABASE_FILE_LIST); - if (pgFileSize(path) >= (BLCKSZ*500)) + if (fio_stat(FIO_BACKUP_HOST, path, &st, true) < 0) + elog(ERROR, "Cannot stat file \"%s\": %s", path, strerror(errno)); + + if (st.st_size >= (BLCKSZ*500)) { elog(WARNING, "Backup %s is a victim of metadata corruption. " "Additional information can be found here: " From 756060eb65803e546c6864c3ce133f090b4432db Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 7 Jan 2022 23:09:56 +0300 Subject: [PATCH 014/339] fio_symlink() refactor --- src/catchup.c | 2 +- src/utils/file.c | 38 ++++++++++++++++++++++++++------------ 2 files changed, 27 insertions(+), 13 deletions(-) diff --git a/src/catchup.c b/src/catchup.c index ec707f084..3b6a0fe6a 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -862,7 +862,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, /* create link to linked_path */ if (fio_symlink(FIO_LOCAL_HOST, linked_path, to_path, true) < 0) elog(ERROR, "Could not create symbolic link \"%s\" -> \"%s\": %s", - linked_path, to_path, strerror(errno)); + to_path, linked_path, strerror(errno)); } } diff --git a/src/utils/file.c b/src/utils/file.c index e04a92707..36ecf208d 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1256,18 +1256,27 @@ fio_symlink(fio_location location, const char* target, const char* link_path, bo { if (fio_is_remote(location)) { - fio_header hdr; size_t target_len = strlen(target) + 1; size_t link_path_len = strlen(link_path) + 1; - hdr.cop = FIO_SYMLINK; - hdr.handle = -1; - hdr.size = target_len + link_path_len; - hdr.arg = overwrite ? 1 : 0; + fio_header hdr = { + .cop = FIO_SYMLINK, + .handle = -1, + .size = target_len + link_path_len, + .arg = overwrite ? 1 : 0, + }; IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_write_all(fio_stdout, target, target_len), target_len); IO_CHECK(fio_write_all(fio_stdout, link_path, link_path_len), link_path_len); + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + Assert(hdr.cop == FIO_SYMLINK); + + if (hdr.arg != 0) + { + errno = hdr.arg; + return -1; + } return 0; } else @@ -1280,17 +1289,22 @@ fio_symlink(fio_location location, const char* target, const char* link_path, bo } static void -fio_symlink_impl(int out, char *buf, bool overwrite) +fio_symlink_impl(const char* target, const char* link_path, bool overwrite, int out) { - char *linked_path = buf; - char *link_path = buf + strlen(buf) + 1; + fio_header hdr = { + .cop = FIO_SYMLINK, + .handle = -1, + .size = 0, + .arg = 0, + }; if (overwrite) remove_file_or_dir(link_path); - if (symlink(linked_path, link_path)) - elog(ERROR, "Could not create symbolic link \"%s\": %s", - link_path, strerror(errno)); + if (symlink(target, link_path) != 0) + hdr.arg = errno; + + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); } /* Rename file */ @@ -3428,7 +3442,7 @@ fio_communicate(int in, int out) fio_rename_impl(buf, buf + strlen(buf) + 1, out); break; case FIO_SYMLINK: /* Create symbolic link */ - fio_symlink_impl(out, buf, hdr.arg > 0 ? true : false); + fio_symlink_impl(buf, buf + strlen(buf) + 1, hdr.arg == 1, out); break; case FIO_REMOVE: /* Remove file or directory (TODO: Win32) */ fio_remove_impl(buf, hdr.arg == 1, out); From e78b882739e803045e9fb6e7e71dd43d74f575f6 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Sat, 8 Jan 2022 08:27:00 +0300 Subject: [PATCH 015/339] [refactoring] fix typos --- src/utils/file.c | 4 ++-- tests/merge.py | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index 36ecf208d..7851648a6 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -615,7 +615,7 @@ fio_close(int fd) /* Wait for response */ IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); - Assert(hdr.arg == FIO_CLOSE); + Assert(hdr.cop == FIO_CLOSE); if (hdr.arg != 0) { @@ -831,7 +831,7 @@ fio_write(int fd, void const* buf, size_t size) /* check results */ IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); - Assert(hdr.arg == FIO_WRITE); + Assert(hdr.cop == FIO_WRITE); /* set errno */ if (hdr.arg > 0) diff --git a/tests/merge.py b/tests/merge.py index fe0927f49..cf9618111 100644 --- a/tests/merge.py +++ b/tests/merge.py @@ -1191,7 +1191,7 @@ def test_continue_failed_merge_2(self): gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True) - gdb.set_breakpoint('pgFileDelete') + gdb.set_breakpoint('fio_remove') gdb.run_until_break() @@ -1683,7 +1683,7 @@ def test_failed_merge_after_delete(self): gdb.set_breakpoint('delete_backup_files') gdb.run_until_break() - gdb.set_breakpoint('pgFileDelete') + gdb.set_breakpoint('fio_remove') gdb.continue_execution_until_break(20) gdb._execute('signal SIGKILL') @@ -1767,7 +1767,7 @@ def test_failed_merge_after_delete_1(self): # gdb.set_breakpoint('parray_bsearch') # gdb.continue_execution_until_break() - gdb.set_breakpoint('pgFileDelete') + gdb.set_breakpoint('fio_remove') gdb.continue_execution_until_break(30) gdb._execute('signal SIGKILL') @@ -1829,7 +1829,7 @@ def test_failed_merge_after_delete_2(self): backup_dir, 'node', page_2, gdb=True, options=['--log-level-console=VERBOSE']) - gdb.set_breakpoint('pgFileDelete') + gdb.set_breakpoint('fio_remove') gdb.run_until_break() gdb.continue_execution_until_break(2) gdb._execute('signal SIGKILL') @@ -1907,7 +1907,7 @@ def test_failed_merge_after_delete_3(self): gdb.set_breakpoint('delete_backup_files') gdb.run_until_break() - gdb.set_breakpoint('pgFileDelete') + gdb.set_breakpoint('fio_remove') gdb.continue_execution_until_break(20) gdb._execute('signal SIGKILL') From 48639c7af93c2c0a62cc39283696841e7db2b21c Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Sat, 8 Jan 2022 08:30:25 +0300 Subject: [PATCH 016/339] ugly fix: exclusive locks are incorrectly released multiple times, previosly errors was not checked --- src/catalog.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/catalog.c b/src/catalog.c index afe438ba3..c4c392d5c 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -732,7 +732,8 @@ release_excl_lock_file(const char *backup_dir) /* TODO Sanity check: maybe we should check, that pid in lock file is my_pid */ /* remove pid file */ - if (fio_remove(FIO_BACKUP_HOST, lock_file, false) != 0) + /* exclusive locks releasing multiple times -> missing_ok = true */ + if (fio_remove(FIO_BACKUP_HOST, lock_file, true) != 0) elog(ERROR, "Cannot remove exclusive lock file \"%s\": %s", lock_file, strerror(errno)); } From 0df0f9affc9916fe77415a1561494213c3c453de Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Sat, 8 Jan 2022 08:44:54 +0300 Subject: [PATCH 017/339] Ugly fix (set missing_ok = true) Previously, the return code was not checked when deleting files. Now, in order for the tests to work out, you need to ignore the missing files. This fix for tests: tests.retention.RetentionTest.test_window_merge_interleaved_incremental_chains_1 tests.retention.RetentionTest.test_window_chains tests.merge.MergeTest.test_smart_merge tests.merge.MergeTest.test_merge_tablespaces tests.merge.MergeTest.test_merge_pg_filenode_map tests.merge.MergeTest.test_failed_merge_after_delete_3 tests.merge.MergeTest.test_failed_merge_after_delete tests.merge.MergeTest.test_crash_after_opening_backup_control_2 --- src/dir.c | 2 +- src/merge.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/dir.c b/src/dir.c index 23b4f9121..ab678dac7 100644 --- a/src/dir.c +++ b/src/dir.c @@ -1817,7 +1817,7 @@ cleanup_tablespace(const char *path) join_path_components(fullpath, path, file->rel_path); - if (fio_remove(FIO_DB_HOST, fullpath, false) == 0) + if (fio_remove(FIO_DB_HOST, fullpath, true) == 0) elog(VERBOSE, "Deleted file \"%s\"", fullpath); else elog(ERROR, "Cannot delete file or directory \"%s\": %s", fullpath, strerror(errno)); diff --git a/src/merge.c b/src/merge.c index a23079b16..fb04a7a6b 100644 --- a/src/merge.c +++ b/src/merge.c @@ -809,7 +809,7 @@ merge_chain(InstanceState *instanceState, /* We need full path, file object has relative path */ join_path_components(full_file_path, full_database_dir, full_file->rel_path); - if (fio_remove(FIO_BACKUP_HOST, full_file_path, false) == 0) + if (fio_remove(FIO_BACKUP_HOST, full_file_path, true) == 0) elog(VERBOSE, "Deleted \"%s\"", full_file_path); else elog(ERROR, "Cannot delete file or directory \"%s\": %s", full_file_path, strerror(errno)); @@ -1145,7 +1145,7 @@ remove_dir_with_files(const char *path) join_path_components(full_path, path, file->rel_path); - if (fio_remove(FIO_LOCAL_HOST, full_path, false) == 0) + if (fio_remove(FIO_LOCAL_HOST, full_path, true) == 0) elog(VERBOSE, "Deleted \"%s\"", full_path); else elog(ERROR, "Cannot delete file or directory \"%s\": %s", full_path, strerror(errno)); From 0940968e25175567d56219448b230e981b8c291d Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Sat, 8 Jan 2022 08:58:16 +0300 Subject: [PATCH 018/339] fix test_failed_merge_after_delete_2 --- tests/merge.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/merge.py b/tests/merge.py index cf9618111..5b04e4704 100644 --- a/tests/merge.py +++ b/tests/merge.py @@ -1829,6 +1829,8 @@ def test_failed_merge_after_delete_2(self): backup_dir, 'node', page_2, gdb=True, options=['--log-level-console=VERBOSE']) + gdb.set_breakpoint('delete_backup_files') + gdb.run_until_break() gdb.set_breakpoint('fio_remove') gdb.run_until_break() gdb.continue_execution_until_break(2) From cadded9bf07a7b29f28222a1268c21f539e6f842 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Mon, 10 Jan 2022 20:07:43 +0300 Subject: [PATCH 019/339] Support for building pg_probackup outside the source tree --- .gitignore | 14 +--- Makefile | 162 +++++++++++++++++++++++++--------------- get_pg_version.mk | 36 +++++++++ packaging/Makefile.pkg | 16 ++-- packaging/Makefile.repo | 4 +- packaging/Makefile.test | 22 +++--- 6 files changed, 162 insertions(+), 92 deletions(-) create mode 100644 get_pg_version.mk diff --git a/.gitignore b/.gitignore index c0b4de331..622c71b37 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ # Object files *.o +*.bc # Libraries *.lib @@ -33,15 +34,8 @@ /tests/helpers/*pyc # Extra files -/src/pg_crc.c -/src/receivelog.c -/src/receivelog.h -/src/streamutil.c -/src/streamutil.h -/src/xlogreader.c -/src/walmethods.c -/src/walmethods.h -/src/instr_time.h +/src/borrowed/ +/borrowed.mk # Doc files /doc/*html @@ -55,7 +49,7 @@ /backup_restore.sh # Packaging -/build +/pkg-build /packaging/pkg/tarballs/pgpro.tar.bz2 /packaging/repo/pg_probackup /packaging/repo/pg_probackup-forks diff --git a/Makefile b/Makefile index f93cc37a4..f32b2f94d 100644 --- a/Makefile +++ b/Makefile @@ -1,43 +1,77 @@ -PROGRAM = pg_probackup -WORKDIR ?= $(CURDIR) -BUILDDIR = $(WORKDIR)/build/ -PBK_GIT_REPO = https://github.com/postgrespro/pg_probackup +# pg_probackup build system +# +# You can build pg_probackup in different ways: +# +# 1. in source tree using PGXS (with already installed PG and existing PG sources) +# git clone https://github.com/postgrespro/pg_probackup pg_probackup +# cd pg_probackup +# make USE_PGXS=1 PG_CONFIG= top_srcdir= +# +# 2. out of source using PGXS +# git clone https://github.com/postgrespro/pg_probackup pg_probackup-src +# mkdir pg_probackup-build && cd pg_probackup-build +# make USE_PGXS=1 PG_CONFIG= top_srcdir= -f ../pg_probackup-src/Makefile +# +# 3. in PG source (without PGXS -- using only PG sources) +# git clone https://git.postgresql.org/git/postgresql.git postgresql +# git clone https://github.com/postgrespro/pg_probackup postgresql/contrib/pg_probackup +# cd postgresql +# ./configure ... && make +# make --no-print-directory -C contrib/pg_probackup +# +# 4. out of PG source and without PGXS +# git clone https://git.postgresql.org/git/postgresql.git postgresql-src +# git clone https://github.com/postgrespro/pg_probackup postgresql-src/contrib/pg_probackup +# mkdir postgresql-build && cd postgresql-build +# ../postgresql-src/configure ... && make +# make --no-print-directory -C contrib/pg_probackup +# +top_pbk_srcdir := $(dir $(realpath $(firstword $(MAKEFILE_LIST)))) -# utils -OBJS = src/utils/configuration.o src/utils/json.o src/utils/logger.o \ - src/utils/parray.o src/utils/pgut.o src/utils/thread.o src/utils/remote.o src/utils/file.o +# get postgres version +PG_MAJORVER != $(MAKE) USE_PGXS=$(USE_PGXS) PG_CONFIG=$(PG_CONFIG) --silent --makefile=$(top_pbk_srcdir)get_pg_version.mk +#$(info Making with PG_MAJORVER=$(PG_MAJORVER)) +PROGRAM := pg_probackup + +# pg_probackup sources +OBJS := src/utils/configuration.o src/utils/json.o src/utils/logger.o \ + src/utils/parray.o src/utils/pgut.o src/utils/thread.o src/utils/remote.o src/utils/file.o OBJS += src/archive.o src/backup.o src/catalog.o src/checkdb.o src/configure.o src/data.o \ src/delete.o src/dir.o src/fetch.o src/help.o src/init.o src/merge.o \ src/parsexlog.o src/ptrack.o src/pg_probackup.o src/restore.o src/show.o src/stream.o \ src/util.o src/validate.o src/datapagemap.o src/catchup.o -# borrowed files -OBJS += src/pg_crc.o src/receivelog.o src/streamutil.o \ - src/xlogreader.o - -EXTRA_CLEAN = src/pg_crc.c \ - src/receivelog.c src/receivelog.h src/streamutil.c src/streamutil.h \ - src/xlogreader.c src/instr_time.h - -ifdef top_srcdir -srchome := $(abspath $(top_srcdir)) -else -top_srcdir=../.. -ifneq (,$(wildcard ../../../contrib/pg_probackup)) -# separate build directory support -srchome := $(abspath $(top_srcdir)/..) -else -srchome := $(abspath $(top_srcdir)) -endif +# sources borrowed from postgresql (paths are relative to pg top dir) +BORROWED_H_SRC := \ + src/include/portability/instr_time.h \ + src/bin/pg_basebackup/receivelog.h \ + src/bin/pg_basebackup/streamutil.h +BORROWED_C_SRC := \ + src/backend/access/transam/xlogreader.c \ + src/backend/utils/hash/pg_crc.c \ + src/bin/pg_basebackup/receivelog.c \ + src/bin/pg_basebackup/streamutil.c +ifneq ($(PG_MAJORVER), $(findstring $(PG_MAJORVER), 9.5 9.6)) +BORROWED_H_SRC += \ + src/bin/pg_basebackup/walmethods.h +BORROWED_C_SRC += \ + src/bin/pg_basebackup/walmethods.c endif -# OBJS variable must be finally defined before invoking the include directive -ifneq (,$(wildcard $(srchome)/src/bin/pg_basebackup/walmethods.c)) -OBJS += src/walmethods.o -EXTRA_CLEAN += src/walmethods.c src/walmethods.h +BORROW_DIR := src/borrowed +BORROWED_H := $(addprefix $(BORROW_DIR)/, $(notdir $(BORROWED_H_SRC))) +BORROWED_C := $(addprefix $(BORROW_DIR)/, $(notdir $(BORROWED_C_SRC))) +OBJS += $(patsubst %.c, %.o, $(BORROWED_C)) +EXTRA_CLEAN := $(BORROWED_H) $(BORROWED_C) $(BORROW_DIR) borrowed.mk + +# off-source build support +ifneq ($(abspath $(CURDIR))/, $(top_pbk_srcdir)) +VPATH := $(top_pbk_srcdir) endif +# standard PGXS stuff +# all OBJS must be defined above this ifdef USE_PGXS PG_CONFIG = pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) @@ -49,41 +83,47 @@ include $(top_builddir)/src/Makefile.global include $(top_srcdir)/contrib/contrib-global.mk endif -PG_CPPFLAGS = -I$(libpq_srcdir) ${PTHREAD_CFLAGS} -Isrc -I$(srchome)/$(subdir)/src +# now we can use standard MAJORVERSION variable instead of calculated PG_MAJORVER +undefine PG_MAJORVER + +# +PG_CPPFLAGS = -I$(libpq_srcdir) ${PTHREAD_CFLAGS} -I$(top_pbk_srcdir)/src -I$(BORROW_DIR) +ifdef VPATH +PG_CPPFLAGS += -Isrc +endif override CPPFLAGS := -DFRONTEND $(CPPFLAGS) $(PG_CPPFLAGS) PG_LIBS_INTERNAL = $(libpq_pgport) ${PTHREAD_CFLAGS} -src/utils/configuration.o: src/datapagemap.h -src/archive.o: src/instr_time.h -src/backup.o: src/receivelog.h src/streamutil.h - -src/instr_time.h: $(srchome)/src/include/portability/instr_time.h - rm -f $@ && $(LN_S) $(srchome)/src/include/portability/instr_time.h $@ -src/pg_crc.c: $(srchome)/src/backend/utils/hash/pg_crc.c - rm -f $@ && $(LN_S) $(srchome)/src/backend/utils/hash/pg_crc.c $@ -src/receivelog.c: $(srchome)/src/bin/pg_basebackup/receivelog.c - rm -f $@ && $(LN_S) $(srchome)/src/bin/pg_basebackup/receivelog.c $@ -ifneq (,$(wildcard $(srchome)/src/bin/pg_basebackup/walmethods.c)) -src/receivelog.h: src/walmethods.h $(srchome)/src/bin/pg_basebackup/receivelog.h -else -src/receivelog.h: $(srchome)/src/bin/pg_basebackup/receivelog.h +# additional dependencies on borrowed files +src/archive.o: $(BORROW_DIR)/instr_time.h +src/backup.o src/catchup.o src/pg_probackup.o: $(BORROW_DIR)/streamutil.h +src/stream.o $(BORROW_DIR)/receivelog.o $(BORROW_DIR)/streamutil.o: $(BORROW_DIR)/receivelog.h +ifneq ($(MAJORVERSION), $(findstring $(MAJORVERSION), 9.5 9.6)) +$(BORROW_DIR)/receivelog.h: $(BORROW_DIR)/walmethods.h +$(BORROW_DIR)/walmethods.o: $(BORROW_DIR)/receivelog.h endif - rm -f $@ && $(LN_S) $(srchome)/src/bin/pg_basebackup/receivelog.h $@ -src/streamutil.c: $(srchome)/src/bin/pg_basebackup/streamutil.c - rm -f $@ && $(LN_S) $(srchome)/src/bin/pg_basebackup/streamutil.c $@ -src/streamutil.h: $(srchome)/src/bin/pg_basebackup/streamutil.h - rm -f $@ && $(LN_S) $(srchome)/src/bin/pg_basebackup/streamutil.h $@ -src/xlogreader.c: $(srchome)/src/backend/access/transam/xlogreader.c - rm -f $@ && $(LN_S) $(srchome)/src/backend/access/transam/xlogreader.c $@ -src/walmethods.c: $(srchome)/src/bin/pg_basebackup/walmethods.c - rm -f $@ && $(LN_S) $(srchome)/src/bin/pg_basebackup/walmethods.c $@ -src/walmethods.h: $(srchome)/src/bin/pg_basebackup/walmethods.h - rm -f $@ && $(LN_S) $(srchome)/src/bin/pg_basebackup/walmethods.h $@ -ifeq ($(PORTNAME), aix) - CC=xlc_r -endif +# generate separate makefile to handle borrowed files +borrowed.mk: $(firstword $(MAKEFILE_LIST)) + $(file >$@,# This file is autogenerated. Do not edit!) + $(foreach borrowed_file, $(BORROWED_H_SRC) $(BORROWED_C_SRC), \ + $(file >>$@,$(addprefix $(BORROW_DIR)/, $(notdir $(borrowed_file))): | $(CURDIR)/$(BORROW_DIR)/ $(realpath $(top_srcdir)/$(borrowed_file))) \ + $(file >>$@,$(shell echo "\t"'$$(LN_S) $(realpath $(top_srcdir)/$(borrowed_file)) $$@')) \ + ) +include borrowed.mk + +# create needed directories for borrowed files and off-source build +OBJDIRS = $(addprefix $(CURDIR)/, $(sort $(dir $(OBJS)))) +$(OBJS): | $(OBJDIRS) +$(OBJDIRS): + mkdir -p $@ + +# packaging infrastructure +WORKDIR ?= $(CURDIR) +PBK_PKG_BUILDDIR = $(WORKDIR)/pkg-build/ +PBK_GIT_REPO = https://github.com/postgrespro/pg_probackup + +include $(top_pbk_srcdir)/packaging/Makefile.pkg +include $(top_pbk_srcdir)/packaging/Makefile.repo +include $(top_pbk_srcdir)/packaging/Makefile.test -include packaging/Makefile.pkg -include packaging/Makefile.repo -include packaging/Makefile.test diff --git a/get_pg_version.mk b/get_pg_version.mk new file mode 100644 index 000000000..d5468c5bb --- /dev/null +++ b/get_pg_version.mk @@ -0,0 +1,36 @@ +# pg_probackup build system +# +# When building pg_probackup, there is a chicken and egg problem: +# 1. We have to define the OBJS list before including the PG makefiles. +# 2. To define this list, we need to know the PG major version. +# 3. But we can find out the postgres version only after including makefiles. +# +# This minimal makefile solves this problem, its only purpose is to +# calculate the version number from which the main build will occur next. +# +# Usage: +# include this line into main makefile +# PG_MAJORVER != $(MAKE) USE_PGXS=$(USE_PGXS) PG_CONFIG=$(PG_CONFIG) --silent --makefile=get_pg_version.mk +# +# Known issues: +# When parent make called with -C and without --no-print-directory, then +# 'make: Leaving directory ...' string will be added (by caller make process) to PG_MAJORVER +# (at least with GNU Make 4.2.1) +# +.PHONY: get_pg_version +get_pg_version: + +ifdef USE_PGXS +PG_CONFIG = pg_config +PGXS := $(shell $(PG_CONFIG) --pgxs) +include $(PGXS) +else +subdir = contrib/pg_probackup +top_builddir = ../.. +include $(top_builddir)/src/Makefile.global +include $(top_srcdir)/contrib/contrib-global.mk +endif + +get_pg_version: + $(info $(MAJORVERSION)) + diff --git a/packaging/Makefile.pkg b/packaging/Makefile.pkg index e17243614..7ebfe315c 100644 --- a/packaging/Makefile.pkg +++ b/packaging/Makefile.pkg @@ -36,7 +36,7 @@ build/prepare: mkdir -p build build/clean: build/prepare - find $(BUILDDIR) -maxdepth 1 -type f -exec rm -f {} \; + find $(PBK_PKG_BUILDDIR) -maxdepth 1 -type f -exec rm -f {} \; build/all: build/debian build/ubuntu build/centos build/oraclelinux build/alt build/suse build/rhel @echo Packaging is done @@ -76,8 +76,8 @@ define build_deb --rm pgpro/$1:$2 /app/in/scripts/deb.sh endef -include packaging/pkg/Makefile.debian -include packaging/pkg/Makefile.ubuntu +include $(top_pbk_srcdir)/packaging/pkg/Makefile.debian +include $(top_pbk_srcdir)/packaging/pkg/Makefile.ubuntu # CENTOS build/centos: build/centos_7 build/centos_8 #build/rpm_repo_package_centos @@ -127,9 +127,9 @@ define build_rpm --rm pgpro/$1:$2 /app/in/scripts/rpm.sh endef -include packaging/pkg/Makefile.centos -include packaging/pkg/Makefile.rhel -include packaging/pkg/Makefile.oraclelinux +include $(top_pbk_srcdir)/packaging/pkg/Makefile.centos +include $(top_pbk_srcdir)/packaging/pkg/Makefile.rhel +include $(top_pbk_srcdir)/packaging/pkg/Makefile.oraclelinux # Alt Linux @@ -157,7 +157,7 @@ define build_alt --rm pgpro/$1:$2 /app/in/scripts/alt.sh endef -include packaging/pkg/Makefile.alt +include $(top_pbk_srcdir)/packaging/pkg/Makefile.alt # SUSE Linux build/suse: build/suse_15.1 build/suse_15.2 @@ -182,4 +182,4 @@ define build_suse --rm pgpro/$1:$2 /app/in/scripts/suse.sh endef -include packaging/pkg/Makefile.suse +include $(top_pbk_srcdir)/packaging/pkg/Makefile.suse diff --git a/packaging/Makefile.repo b/packaging/Makefile.repo index 10fb27137..fdfb1d427 100644 --- a/packaging/Makefile.repo +++ b/packaging/Makefile.repo @@ -100,9 +100,9 @@ build/repo_suse_15.2: repo_finish: # cd build/data/www/$(PBK_PKG_REPO)/ - cd $(BUILDDIR)/data/www/$(PBK_PKG_REPO)/rpm && sudo ln -nsf $(PBK_VERSION) latest + cd $(PBK_PKG_BUILDDIR)/data/www/$(PBK_PKG_REPO)/rpm && sudo ln -nsf $(PBK_VERSION) latest # following line only for vanilla - cd $(BUILDDIR)/data/www/$(PBK_PKG_REPO)/srpm && sudo ln -nsf $(PBK_VERSION) latest + cd $(PBK_PKG_BUILDDIR)/data/www/$(PBK_PKG_REPO)/srpm && sudo ln -nsf $(PBK_VERSION) latest # sudo ln -rfs build/data/www/$(PBK_PKG_REPO)/rpm/${PBK_VERSION} build/data/www/$(PBK_PKG_REPO)/rpm/latest # sudo ln -rfs build/data/www/$(PBK_PKG_REPO)/srpm/${PBK_VERSION} build/data/www/$(PBK_PKG_REPO)/srpm/latest diff --git a/packaging/Makefile.test b/packaging/Makefile.test index f5e004f01..aa9cb801f 100644 --- a/packaging/Makefile.test +++ b/packaging/Makefile.test @@ -39,7 +39,7 @@ define test_deb docker rm -f $1_$2_probackup_$(PKG_NAME_SUFFIX)$(PBK_VERSION) >> /dev/null 2>&1 ; \ docker run \ -v $(WORKDIR)/packaging/test:/app/in \ - -v $(BUILDDIR)/data/www:/app/www \ + -v $(PBK_PKG_BUILDDIR)/data/www:/app/www \ -e "DISTRIB=$1" -e "DISTRIB_VERSION=$2" -e "CODENAME=$3" -e "PG_VERSION=$4" -e "PG_FULL_VERSION=$5" \ -e "PKG_HASH=$(PBK_HASH)" -e "PKG_URL=$(PBK_GIT_REPO)" -e "PKG_RELEASE=$(PBK_RELEASE)" -e "PKG_NAME=pg-probackup-$(PKG_NAME_SUFFIX)$4" \ -e "PKG_VERSION=$(PBK_VERSION)" -e "PBK_EDITION=$(PBK_EDITION)" -e "PBK_EDITION_FULL=$(PBK_EDITION_FULL)" \ @@ -47,8 +47,8 @@ define test_deb --rm pgpro/$1:$2 /app/in/scripts/deb$(SCRIPT_SUFFIX).sh endef -include packaging/test/Makefile.debian -include packaging/test/Makefile.ubuntu +include $(top_pbk_srcdir)/packaging/test/Makefile.debian +include $(top_pbk_srcdir)/packaging/test/Makefile.ubuntu # CENTOS build/test_centos: build/test_centos_7 build/test_centos_8 @@ -86,7 +86,7 @@ define test_rpm docker rm -f $1_$2_probackup_$(PKG_NAME_SUFFIX)$(PBK_VERSION) >> /dev/null 2>&1 ; \ docker run \ -v $(WORKDIR)/packaging/test:/app/in \ - -v $(BUILDDIR)/data/www:/app/www \ + -v $(PBK_PKG_BUILDDIR)/data/www:/app/www \ -e "DISTRIB=$1" -e "DISTRIB_VERSION=$2" -e "CODENAME=$3" -e "PG_VERSION=$4" -e "PG_FULL_VERSION=$5" \ -e "PKG_HASH=$(PBK_HASH)" -e "PKG_URL=$(PBK_GIT_REPO)" -e "PKG_RELEASE=$(PBK_RELEASE)" -e "PKG_NAME=pg_probackup-$(PKG_NAME_SUFFIX)$4" \ -e "PKG_VERSION=$(PBK_VERSION)" -e "PBK_EDITION=$(PBK_EDITION)" -e "PBK_EDITION_FULL=$(PBK_EDITION_FULL)" \ @@ -94,9 +94,9 @@ define test_rpm --rm pgpro/$1:$2 /app/in/scripts/rpm$(SCRIPT_SUFFIX).sh endef -include packaging/test/Makefile.centos -include packaging/test/Makefile.rhel -include packaging/test/Makefile.oraclelinux +include $(top_pbk_srcdir)/packaging/test/Makefile.centos +include $(top_pbk_srcdir)/packaging/test/Makefile.rhel +include $(top_pbk_srcdir)/packaging/test/Makefile.oraclelinux # Alt Linux build/test_alt: build/test_alt_8 build/test_alt_9 @@ -115,7 +115,7 @@ define test_alt docker rm -f $1_$2_probackup_$(PKG_NAME_SUFFIX)$(PBK_VERSION) >> /dev/null 2>&1 ; \ docker run \ -v $(WORKDIR)/packaging/test:/app/in \ - -v $(BUILDDIR)/data/www:/app/www \ + -v $(PBK_PKG_BUILDDIR)/data/www:/app/www \ -e "DISTRIB=$1" -e "DISTRIB_VERSION=$2" -e "CODENAME=$3" -e "PG_VERSION=$4" -e "PG_FULL_VERSION=$5" \ -e "PKG_HASH=$(PBK_HASH)" -e "PKG_URL=$(PBK_GIT_REPO)" -e "PKG_RELEASE=$(PBK_RELEASE)" -e "PKG_NAME=pg_probackup-$(PKG_NAME_SUFFIX)$4" \ -e "PKG_VERSION=$(PBK_VERSION)" -e "PBK_EDITION=$(PBK_EDITION)" -e "PBK_EDITION_FULL=$(PBK_EDITION_FULL)" \ @@ -123,7 +123,7 @@ define test_alt --rm pgpro/$1:$2 /app/in/scripts/alt$(SCRIPT_SUFFIX).sh endef -include packaging/test/Makefile.alt +include $(top_pbk_srcdir)/packaging/test/Makefile.alt # SUSE Linux build/test_suse: build/test_suse_15.1 build/test_suse_15.2 @@ -139,7 +139,7 @@ define test_suse docker rm -f $1_$2_probackup_$(PKG_NAME_SUFFIX)$(PBK_VERSION) >> /dev/null 2>&1 ; \ docker run \ -v $(WORKDIR)/packaging/test:/app/in \ - -v $(BUILDDIR)/data/www:/app/www \ + -v $(PBK_PKG_BUILDDIR)/data/www:/app/www \ -e "DISTRIB=$1" -e "DISTRIB_VERSION=$2" -e "CODENAME=$3" -e "PG_VERSION=$4" -e "PG_FULL_VERSION=$5" \ -e "PKG_HASH=$(PBK_HASH)" -e "PKG_URL=$(PBK_GIT_REPO)" -e "PKG_RELEASE=$(PBK_RELEASE)" -e "PKG_NAME=pg_probackup-$(PKG_NAME_SUFFIX)$4" \ -e "PKG_VERSION=$(PBK_VERSION)" -e "PBK_EDITION=$(PBK_EDITION)" -e "PBK_EDITION_FULL=$(PBK_EDITION_FULL)" \ @@ -147,4 +147,4 @@ define test_suse --rm pgpro/$1:$2 /app/in/scripts/suse$(SCRIPT_SUFFIX).sh endef -include packaging/test/Makefile.suse +include $(top_pbk_srcdir)/packaging/test/Makefile.suse From dea9368c7e5f202dc47a8daa37cbf6dbc03b1b8d Mon Sep 17 00:00:00 2001 From: Elena Indrupskaya Date: Thu, 24 Mar 2022 10:28:21 +0300 Subject: [PATCH 020/339] [DOC] Remove mentioning PostgreSQL 9.5 [skip travis] --- doc/pgprobackup.xml | 37 +------------------------------------ 1 file changed, 1 insertion(+), 36 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 76ec2cd76..887ed08cf 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -164,7 +164,7 @@ doc/src/sgml/pgprobackup.sgml recovery of PostgreSQL database clusters. It is designed to perform periodic backups of the PostgreSQL instance that enable you to restore the server in case of a failure. - pg_probackup supports PostgreSQL 9.5 or higher. + pg_probackup supports PostgreSQL 9.6 or higher. @@ -401,11 +401,6 @@ doc/src/sgml/pgprobackup.sgml pg_probackup currently has the following limitations: - - - pg_probackup only supports PostgreSQL 9.5 and higher. - - The remote mode is not supported on Windows systems. @@ -422,17 +417,6 @@ doc/src/sgml/pgprobackup.sgml option to postgres. - - - For PostgreSQL 9.5, functions - pg_create_restore_point(text) and - pg_switch_xlog() can be executed only if - the backup role is a superuser, so backup of a - cluster with low amount of WAL traffic by a non-superuser - role can take longer than the backup of the same cluster by - a superuser role. - - The PostgreSQL server from which the backup was taken and @@ -612,25 +596,6 @@ pg_probackup add-instance -B backup_dir -D used for connection to the PostgreSQL server: - - For PostgreSQL 9.5: - - -BEGIN; -CREATE ROLE backup WITH LOGIN; -GRANT USAGE ON SCHEMA pg_catalog TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; -COMMIT; - For PostgreSQL 9.6: From 4ee859228a88c1cab13da1970050c999c434eb1a Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 10 Jun 2022 21:20:36 +0300 Subject: [PATCH 021/339] rapid agent close + disable ssh control master. --- src/utils/file.c | 5 ++++- src/utils/remote.c | 3 +++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/utils/file.c b/src/utils/file.c index 7851648a6..484898d19 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -492,8 +492,10 @@ fio_disconnect(void) Assert(hdr.cop == FIO_DISCONNECTED); SYS_CHECK(close(fio_stdin)); SYS_CHECK(close(fio_stdout)); + SYS_CHECK(close(fio_stderr)); fio_stdin = 0; fio_stdout = 0; + fio_stderr = 0; wait_ssh(); } } @@ -3505,7 +3507,8 @@ fio_communicate(int in, int out) case FIO_DISCONNECT: hdr.cop = FIO_DISCONNECTED; IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); - break; + free(buf); + return; case FIO_GET_ASYNC_ERROR: fio_get_async_error_impl(out); break; diff --git a/src/utils/remote.c b/src/utils/remote.c index ec0110db4..3286052a5 100644 --- a/src/utils/remote.c +++ b/src/utils/remote.c @@ -147,6 +147,9 @@ bool launch_agent(void) ssh_argv[ssh_argc++] = "-o"; ssh_argv[ssh_argc++] = "Compression=no"; + ssh_argv[ssh_argc++] = "-o"; + ssh_argv[ssh_argc++] = "ControlMaster=no"; + ssh_argv[ssh_argc++] = "-o"; ssh_argv[ssh_argc++] = "LogLevel=error"; From 549d98ab93a80e228a2f4551194383395f88f32e Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 5 Jul 2022 10:45:11 +0300 Subject: [PATCH 022/339] add fu_utils --- Makefile | 4 +- gen_probackup_project.pl | 1 + src/fu_util/CMakeLists.txt | 44 + src/fu_util/LICENSE | 22 + src/fu_util/README.obj.ru.md | 923 ++++++++++++++++ src/fu_util/fm_util.h | 237 ++++ src/fu_util/fo_obj.h | 592 ++++++++++ src/fu_util/ft_ar_examples.h | 71 ++ src/fu_util/ft_array.inc.h | 590 ++++++++++ src/fu_util/ft_search.inc.h | 113 ++ src/fu_util/ft_sort.inc.h | 174 +++ src/fu_util/ft_ss_examples.h | 131 +++ src/fu_util/ft_util.h | 441 ++++++++ src/fu_util/fu_utils_cfg.h | 9 + src/fu_util/fu_utils_cfg.h.in | 9 + src/fu_util/impl/fo_impl.c | 1389 ++++++++++++++++++++++++ src/fu_util/impl/fo_impl.h | 798 ++++++++++++++ src/fu_util/impl/fo_impl2.h | 200 ++++ src/fu_util/impl/ft_impl.c | 593 ++++++++++ src/fu_util/impl/ft_impl.h | 480 ++++++++ src/fu_util/test/CMakeLists.txt | 28 + src/fu_util/test/array.c | 135 +++ src/fu_util/test/bsearch.c | 46 + src/fu_util/test/fuprintf.c | 26 + src/fu_util/test/obj1.c | 293 +++++ src/fu_util/test/qsort/qsort.inc.c | 249 +++++ src/fu_util/test/qsort/qsort_pg.inc.c | 21 + src/fu_util/test/qsort/sort_template.h | 436 ++++++++ src/fu_util/test/sort.c | 257 +++++ src/fu_util/test/sort_p.c | 271 +++++ src/pg_probackup.c | 2 + src/pg_probackup.h | 1 + src/utils/logger.c | 103 +- src/utils/logger.h | 3 + 34 files changed, 8652 insertions(+), 40 deletions(-) create mode 100644 src/fu_util/CMakeLists.txt create mode 100644 src/fu_util/LICENSE create mode 100644 src/fu_util/README.obj.ru.md create mode 100644 src/fu_util/fm_util.h create mode 100644 src/fu_util/fo_obj.h create mode 100644 src/fu_util/ft_ar_examples.h create mode 100644 src/fu_util/ft_array.inc.h create mode 100644 src/fu_util/ft_search.inc.h create mode 100644 src/fu_util/ft_sort.inc.h create mode 100644 src/fu_util/ft_ss_examples.h create mode 100644 src/fu_util/ft_util.h create mode 100644 src/fu_util/fu_utils_cfg.h create mode 100644 src/fu_util/fu_utils_cfg.h.in create mode 100644 src/fu_util/impl/fo_impl.c create mode 100644 src/fu_util/impl/fo_impl.h create mode 100644 src/fu_util/impl/fo_impl2.h create mode 100644 src/fu_util/impl/ft_impl.c create mode 100644 src/fu_util/impl/ft_impl.h create mode 100644 src/fu_util/test/CMakeLists.txt create mode 100644 src/fu_util/test/array.c create mode 100644 src/fu_util/test/bsearch.c create mode 100644 src/fu_util/test/fuprintf.c create mode 100644 src/fu_util/test/obj1.c create mode 100644 src/fu_util/test/qsort/qsort.inc.c create mode 100644 src/fu_util/test/qsort/qsort_pg.inc.c create mode 100644 src/fu_util/test/qsort/sort_template.h create mode 100644 src/fu_util/test/sort.c create mode 100644 src/fu_util/test/sort_p.c diff --git a/Makefile b/Makefile index f32b2f94d..3753d9cb7 100644 --- a/Makefile +++ b/Makefile @@ -37,6 +37,7 @@ PROGRAM := pg_probackup # pg_probackup sources OBJS := src/utils/configuration.o src/utils/json.o src/utils/logger.o \ src/utils/parray.o src/utils/pgut.o src/utils/thread.o src/utils/remote.o src/utils/file.o +OBJS += src/fu_util/impl/ft_impl.o src/fu_util/impl/fo_impl.o OBJS += src/archive.o src/backup.o src/catalog.o src/checkdb.o src/configure.o src/data.o \ src/delete.o src/dir.o src/fetch.o src/help.o src/init.o src/merge.o \ src/parsexlog.o src/ptrack.o src/pg_probackup.o src/restore.o src/show.o src/stream.o \ @@ -87,7 +88,8 @@ endif undefine PG_MAJORVER # -PG_CPPFLAGS = -I$(libpq_srcdir) ${PTHREAD_CFLAGS} -I$(top_pbk_srcdir)/src -I$(BORROW_DIR) +PG_CPPFLAGS = -I$(libpq_srcdir) ${PTHREAD_CFLAGS} -I$(top_pbk_srcdir)src -I$(BORROW_DIR) +PG_CPPFLAGS += -I$(top_pbk_srcdir)src/fu_util -Wno-declaration-after-statement ifdef VPATH PG_CPPFLAGS += -Isrc endif diff --git a/gen_probackup_project.pl b/gen_probackup_project.pl index 8143b7d0d..c912329fa 100644 --- a/gen_probackup_project.pl +++ b/gen_probackup_project.pl @@ -214,6 +214,7 @@ sub build_pgprobackup $probackup->AddIncludeDir("$currpath"); $probackup->AddIncludeDir("$currpath/src"); $probackup->AddIncludeDir("$currpath/src/utils"); + $probackup->AddIncludeDir("$currpath/src/fu_util"); if ($libpgfeutils) { diff --git a/src/fu_util/CMakeLists.txt b/src/fu_util/CMakeLists.txt new file mode 100644 index 000000000..f43152003 --- /dev/null +++ b/src/fu_util/CMakeLists.txt @@ -0,0 +1,44 @@ +cmake_minimum_required(VERSION 3.16) +project(fu_utils VERSION 0.1 LANGUAGES C) + +set(CMAKE_C_STANDARD 99) +set(CMAKE_C_EXTENSIONS true) + +include(CheckCSourceCompiles) + +add_library(fu_utils impl/ft_impl.c impl/fo_impl.c) + +set(THREADS_PREFER_PTHREAD_FLAG ON) +find_package(Threads REQUIRED) +target_link_libraries(fu_utils PRIVATE Threads::Threads) + +# Detect for installed beautiful https://github.com/ianlancetaylor/libbacktrace +include_directories(.) +find_library(LIBBACKTRACE backtrace) +if(LIBBACKTRACE) + set(CMAKE_REQUIRED_LIBRARIES backtrace) + check_c_source_compiles(" + #include + int main(void) { + struct backtrace_state *st = backtrace_create_state(NULL, 0, NULL, NULL); + return 0; + } + " HAVE_LIBBACKTRACE) + if (HAVE_LIBBACKTRACE) + target_compile_definitions(fu_utils PRIVATE HAVE_LIBBACKTRACE) + endif() +endif() + +configure_file(fu_utils_cfg.h.in fu_utils_cfg.h) +target_include_directories(fu_utils INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}") +target_include_directories(fu_utils PRIVATE "${PROJECT_BINARY_DIR}") +target_link_libraries(fu_utils PUBLIC backtrace) + +install(TARGETS fu_utils DESTINATION lib) +install(FILES fm_util.h ft_util.h fo_obj.h + ft_sort.inc.h ft_ss_examples.h ft_search.inc.h ft_array.inc.h + ft_ar_examples.h "${PROJECT_BINARY_DIR}/fu_utils_cfg.h" + DESTINATION include/fu_utils) +install(FILES impl/ft_impl.h impl/fo_impl.h DESTINATION include/fu_utils/impl) + +add_subdirectory(test) \ No newline at end of file diff --git a/src/fu_util/LICENSE b/src/fu_util/LICENSE new file mode 100644 index 000000000..d59b83d19 --- /dev/null +++ b/src/fu_util/LICENSE @@ -0,0 +1,22 @@ +MIT License + +Copyright (c) 2022-2022 Postgres Professional +Copyright (c) 2022-2022 Yura Sokolov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/src/fu_util/README.obj.ru.md b/src/fu_util/README.obj.ru.md new file mode 100644 index 000000000..78b8d142b --- /dev/null +++ b/src/fu_util/README.obj.ru.md @@ -0,0 +1,923 @@ +# Интерфейсно-Объектная библиотечка fobj - funny objects. + +Библиотечка призвана предоставить решение проблеме полиформизма: +- иметь множество реализаций одного поведения (методов), +- и прозрачное инициирование поведения не зависимо от реализации (вызов + метода). + +Реализует концепцию динамического связывания: +- объект аллоцируется со скрытым хедером, содержащим идентификатор класса +- во время вызова метода, реализация метода ищется в рантайме используя + идентификатор класса из хедера объекта и идентификатор метода. + +Плюс, библиотека предоставляет управление временем жизни объектов +посредством счётчика ссылок. + +### Пример + +Рассмотрим на примере создания объекта с состоянием в виде переменной +double и паре методов: "умножить и прибавить" и "прибавить и умножить". + +Без "магии" это могло выглядеть так: +```c + typedef struct dstate { double st; } dstate; + double mult_and_add(dstate *st, double mult, double add) { + st->st = st->st * mult + add; + return st->st; + } + + double add_and_mult(dstate *st, double add, double mult) { + st->st = (st->st + add) * mult; + return st->st; + } + + int main (void) { + dstate mystate = { 1 }; + printf("%f\n", mult_and_add(&mystate, 2, 1)); + printf("%f\n", add_and_mult(&mystate, 2, 1)); + printf("%f\n", mult_and_add(&mystate, 5, 8)); + printf("%f\n", add_and_mult(&mystate, 5, 8)); + } +``` + +## Метод + +Метод - главный персонаж библиотеки. Определяет поведение. + +Метод - "дженерик" функция с диспатчингом по первому аргументу. +Первый аргумент не типизирован. Им может быть "произвольный" объект. +Типы второго и следующего аргумента фиксированные. + +Аргуметны могут быть "обязательными" и "опциональными". У опционального +аргумента может быть значение по умолчанию. + +Диспатчинг происходит в рантайме. Если обнаружится, что на объекте +метод не определён, произойдёт FATAL ошибка с абортом. Если обязательный +аргумент не передан, тоже. + +Имена методов строго уникальны, потому именовать метод рекомендуется +с абревиатурой неймспейса. Также рекомендуется использовать +theCammelCase. + +Чтобы создать метод, нужно: + +- объявить имя, сигнатуру метода. + Обычно это делается в заголовочных файлах (.h) + Первый аргумент (объект) явно объявлять не нужно. +```c + #define mth__hotMultAndAdd double, (double, mult), (double, add) + #define mth__hotAddAndMult double, (double, add), (double, mult) + #define mth__hotGetState double +``` +- если есть опциональные аргументы, то добавить их объявление. + (если нет, то макрос объявлять не нужно) +```c + #define mth__hotMultAndAdd_optional() (mult, 1), (add, 0) +``` +- позвать генерирующий макрос (так же в .h файле, если это метод должен + быть виден из-вне) +```c + fobj_method(hotMultAndAdd); + fobj_method(hotAddAndMult); + fobj_method(hotGetState); +``` + + Макрос генерит следующие объявления, используемые пользователем: + +```c + // Функция вызова метода без "магии" именованных/опциональных + // параметров. + static inline double hotMultAndAdd(fobj_t obj, double mult, double add); + + // Интерфейс для одного метода + typedef union hotMultAndAdd_i { + fobj_t self; + uintptr_t has_hotMultAndAdd; + } hotMultAndAdd_i; + + // Биндинг интерфейса для объекта + static inline hotMultAndAdd_i bind_hotMultAndAdd(fobj_t obj); + + // Биндинг интерфейса + увеличение счётчика ссылок на объекте. + static inline hotMultAndAdd_i bindref_hotAddAndMult(fobj_t obj); + + // Проверка реализации интерфейса + static inline bool implements_hotAddAndMult(fobj_t obj, hotMultAndAdd_i *iface); +``` + + Последующие объявления пользователем непосредственно не используются. + Можете не читать. + +```c + // Хэндл метода + static inline fobj_method_handle_t hotMultAndAdd__mh(void); + + // Тип функции - реализации + typedef double (*hotMultAndAdd__impl)(fobj_t obj, double mult, double mult); + + // Связанная реализация + typedef struct hotMultAndAdd_cb { + fobj_t self; + hotMultAndAdd_impl impl; + } hotMultAndAdd_cb; + + // Получение связанной реализации + static inline hotMultAndAdd_cb + fobj_fetch_hotMultAndAdd(fobj_t obj, fobj_klass_handle_t parent); + + // Регистрация реализации + static inline void + fobj__register_hotMultAndAdd(fobj_klass_handle_t klass, hotMultAndAdd_impl impl); + + // Валидация существования метода для класса + // (В случае ошибки, будет FATAL) + static inline void + fobj__klass_validate_hotMultAndAdd(fobj_klass_handle_t klass); + + // Тип для именованных параметров + typedef struct hotMultAndAdd__params_t { + fobj__dumb_t _dumb_first_param; + + double mult; + fobj__dumb_t mult__given; + + double add; + fobj__dumb_t add__given; + } hotMultAndAdd__params_t; + + // Функция вызова связанной реализации с параметрами. + static inline double + fobj__invoke_hotMultAndAdd(hotMultAndAdd__cb cb, hotMultAndAdd__params params); +``` + +------ + +## Класс + +Класс определяет связывание методов с конкретными объектами. + +Объявляя класс, можно указать: +- список методов +- родителя (опционально) +- интерфейсы, которые нужно провалидировать (опционально) +- расширяемая ли аллокация (опционально) + +Чтобы создать класс, нужно: +- объявить тело класса +```c + typedef struct hotDState { + double st; + } hotDState; + + // И "наследника" + typedef struct hotCState { + hotDState p; // parent + double add_acc; // аккумулятор для слагаемых + } hotCState; +``` + +- обявить сигнатуру класса +```c + #define kls__hotDState mth(hotMultAndAdd, hotGetState), \ + iface(hotState) + #define kls__hotCState inherits(hotDState), \ + mth(hotMultAndAdd, hotAddAndMult), + iface(hotState) +``` + Примечания: + - мы не объявили hotAddAndMult на hotDState, но он удовлетворяет + hotState, т.к. hotAddAndMult не обязателен. + - мы не объявляли hotGetState на hotCState, но он удовлетворяет + hotState, т.к. этот метод он наследует от hotDState. + +- позвать генерирующие макросы +```c + fobj_klass(hotDState); + fobj_klass(hotCState); +``` + На самом деле, это всего лишь генерит методы хэндл-ов. +```c + extern fobj_khandle_method_t hotDState__kh(void); + extern fobj_khandle_method_t hotCState__kh(void); +``` + +- объявить реализации методов: +```c + static double + hotDState_hotMultAndAdd(VSelf, double mult, double add) { + Self(hotDState); + self->st = self->st * mult + add; + return self->st; + } + + static double + hotDState_hotGetState(VSelf) { + Self(hotDState); + return self->st; + } + + static double + hotCState_hotMultAndAdd(VSelf, double mult, double add) { + Self(hotCState); + // Вызов метода на родителе + $super(hotMultAndAdd, self, .mult = mult, .add = add); + self->add_acc += add; + return self->st; + } + + static double + hotCState_hotAddAndMult(VSelf, double add, double mult) { + Self(hotCState); + $(hotMultAndAdd, self, .add = add); + $(hotMultAndAdd, self, .mult = mult); + return self->st; + } +``` + +- После всех реализаций (или хотя бы их прототипов) в одном .c файле нужно создать реализацию хэндла класса: +```c + fobj_klass_handle(hotDState); + fobj_klass_handle(hotCState); +``` + +- Опционально, можно инициализировать класс. + Этот шаг не обязателен чаще всего, но может потребоваться, если вы собираетесь + заморозить рантайм (`fobj_freeze`), или захотите поискать класс по имени. +```c + void + libarary_initialization(void) { + /*...*/ + fobj_klass_init(hotDState); + fobj_klass_init(hotCState); + /*...*/ + } +``` + +### Деструктор + +Когда объект уничтожается, выполняется стандартный метод `fobjDispose` +(если определён на объекте). +```c + typedef struct myStore { + fobj_t field; + } myStore; + + #define kls__myKlass mth(fobjDispose) + + static void + myKlass_fobjDispose(VSelf) { + Self(myKlass); + $del(&self->field); + } +``` + +Методы `fobjDispose` вызываются для всех классов в цепочке наследования, +для которых они определены, по порядку *от потомков к первому родителю*. +Т.е. сперва вызвается `fobjDispose` вашего класса, потом его +непосредственного родителя, потом родителя родителя и т.д. + +Нет способа вернуть ошибку из `fobjDispose` (т.к. не куда). Он обязан +сам со всем разобраться. + +Иногда нужно "отключить объект" не дожидаясь, пока он сам уничтожится. +Т.е. хочется позвать `fobjDispose`. Но явный вызов `fobjDispose` запрещён. +Для этого нужно воспользоваться обёрткой `fobj_dispose`. + +Обёртка гарантирует, что `fobjDispose` будет позван только один раз. +Кроме того, она запоминает, что вызов `fobjDispose` завершился, и после +этого любой вызов метода (вызванный без хаков) на этом объекте будет +падать в FATAL. + +### Методы класса + +Долго думал, и решил, что нет проблем, решаемых методами класса, +и не решаемых другими методами. + +Методы класса играют обычно роли: +- неймспейса для статических функций. + - Но в С можно просто звать глобальные функции. +- синглтон объектов, связанных с множеством объектов. + - Если объекту очень нужен синглтон, можно объявить метод, + возвращающий такой синглтон. Но большинство объектов не требует + связанного с ним синглтона. +- фабрик для создания объектов + - Что не отличается от кейса со статическими функциями. + +В общем, пока не появится очевидная необходимость в методах класса, +делать их не буду. Ибо тогда потребуется создавать мета-классы и их +иерархию. Что будет серьёзным усложнением рантайма. + +### Конструктор + +И тут вероятно вы зададите мне вопрос: +- Эй, уважаемый, что за омлет? А где же яйца? + (с) Дискотека Авария. + +В смысле: до сих пор ни слова про конструкторы. + +И не будет. Применяется подход "Конструктор бедного человека": +- конструированием объекта должен заниматься не объект. +- объект конструирует либо глобальная функция, либо метод другого объекта. + - либо всё нужное для работы объекта готовится перед аллокацией и + передаётся непосредственно в значения полей в момен аллокации, + - либо у объекта есть некий метод `initializeThisFu__ingObject` + который зовётся после аллокации. + (название выдуманно, такого стандартного метода нет), + +Этот подход применён в Go, и в целом, его можно понять и принять. +Сделать семантику конструктора с корректной обработкой возможных ошибок, +вызова родительского конструктора и прочим не просто. И не сказать, +чтобы языки, имеющие конструкторы, справляются с ними без проблем. + +В библиотечке так же наложились проблемы: +- с сохранением где-нибудь имени метода(?) или функции(?) конструктора + и передачи ему параметров. +- перегрузки конструкторов в зависимости от параметров(?) +- необходимость уникальных имён методов для каждого набора параметров. +- необходимость куда-то возвращать ошибки? +- отсутствие методов класса. + +В общем, пораскинув мозгами, я решил, что простота Go рулит, и усложнять +это место не особо нужно. +Тем более, что зачастую объекты создаются в методах других объектах. + +----- + +## Объекты + +Объекты - это экземпляры/инстансы классов. + +### Aллокация. +```c + hotDState* dst; + hotCState* cst; + + // По умолчанию, аллоцируется, зачищенное нулями. + dst = $alloc(DState); + + // Но можно указать значения. + cst = $alloc(CState, .p.st = 100, .add_acc = 0.1); +``` + +На что нужно обратит внимание: +- если вы пользуетесь передачей начальных значений в `$alloc` +- и освобождаете что-то в `fobjDispose` +- то передавать в `$alloc` нужно то, что можно в `fobjDispose` освободить. + +Т.е. +- если вы передаёте объект, то его нужно `$ref(obj)` (см.ниже) +- если вы передаёте строку, то её нужно `ft_strdup(str)` +- и т.д. + +### Вызов метода + +В вызове метода можно указывать аргументы по порядку или используя +имена параметров. + +Опциональные параметры можно пропускать. После пропущенного опционального +параметра можно использовать только именованные параметры. + +```c + // "Классический" + printf("%f\n", $(hotMultAndAdd, dst, 2, 3)); + printf("%f\n", $(hotMultAndAdd, cst, 3, 4)); + printf("%f\n", $(hotGetState, dst)); + printf("%f\n", $(hotGetState, cst)); + printf("%f\n", $(hotAddAndMult, cst, 5, 6)); + + // С именованными параметрами + printf("%f\n", $(hotMultAndAdd, dst, .mult = 2, .add = 3)); + printf("%f\n", $(hotMultAndAdd, cst, .add = 3, .mult = 4)); + printf("%f\n", $(hotGetState, dst)); // нет параметров. + printf("%f\n", $(hotAddAndMult, cst, .add = 5, .mult = 6)); + printf("%f\n", $(hotAddAndMult, cst, .mult = 5, .add = 6)); + + // С дефолтными параметрами + printf("%f\n", $(hotMultAndAdd, dst, .mult = 2)); + printf("%f\n", $(hotMultAndAdd, cst, .add = 3)); + printf("%f\n", $(hotMultAndAdd, cst)); + // А вот это упадёт с FATAL, т.к. у hotAddAndMult не имеет + // опциональных аргументов + // printf("%f\n", $(hotAddAndMult, cst, .add = 5)); + // printf("%f\n", $(hotAddAndMult, cst, .mult = 5)); + // printf("%f\n", $(hotAddAndMult, cst)); +``` + +Можно использовать метод непосредственно как С функцию, но аргументы +придётся тогда указывать все и по порядку. Именнованные аргументы +при этом указать не получится, и пропустить опциональные - тоже. + +```c + printf("%f\n", hotMultAndAdd(dst, 2, 3)); + printf("%f\n", hotMultAndAdd(cst, 3, 4)); + printf("%f\n", hotGetState(dst)); + printf("%f\n", hotGetState(cst)); + printf("%f\n", hotAddAndMult(cst, 5, 6)); + // а вот это свалится с FATAL + // printf("%f\n", hotAddAndMult(dst, 6, 7)); +``` + +### Условные вызов метода. + +Доступна конструкция вызова метода только в случае, если он определён: + +```c + double v; + if ($ifdef(v =, hotMultAndAdd, dst, .mult = 1)) { + printf("dst responds to hotMultAndAdd: %f\n", v); + } + + if ($ifdef(, hotGetStatus, cst)) { + printf("cst responds to hotGetStatus.\n" + "Result assignment could be ommitted. " + "Although compiler could warn on this."); + } +``` + +### Проверка реализации метода + +Можно проверить, определён ли метод на объекте, с помощью макроса +`$implement(Method, obj)` + +```c + if ($implements(hotGetState, dst)) { + workWithObject(dst); + } + + hotGetState_i hgs; + if ($implements(hotGetState, dst, &hgs)) { + $i(hotGetState, hgs); + } +``` + +(На самом деле, используется механизм определения реализации интерфейса, +сгенерённого для метода). + +------- + +## Интерфейс + +Интерфейс - это формальный набор методов. + +Служит для цели проверки согласованности реализации объекта/класса, и +улучшает самодокументируемость сигнатур методов. + +Для каждого метода сразу создаётся интерфейс, содержащий один обязательный +метод. Потому создавать ещё раз интерфейс для одного метода не требуется. + +Чтобы создать интерфейс, нужно: + +- объявить интерфейс +```c + #define iface__hotState mth(hotMultAndAdd, hotGetState), \ + opt(hotAddAndMult) +``` + Здесь мы объявили два обязательных и один опциональный метод. + Количество секций mth и opt - произвольное. Количество методов в них - + тоже. + (Произвольное - в пределах разумного - ~ до 16) + +- позвать генерирующий макрос +```c + fobj_iface(hotState); +``` + + Макрос генерирует объявления: +```c + // Структура интерфейса с реализациями методов. + typedef union hotState_i { + fobj_t self; + uintptr_t has_hotMultAndAdd; + uintptr_t has_hotGetState; + uintptr_t has_hotAddAndMult; + } hotState_i; + + // Биндинг интерфейса для объекта + static inline hotState_i bind_hotState(fobj_t obj); + // Биндинг интерфейса + увеличение счётчика ссылок на объекте + static inline hotState_i bindref_hotState(fobj_t obj); + // Проверка реализации интерфейса + static inline bool implements_hotState(fobj_t obj, hotState_i *iface); +``` + И "скрытое объявление" +``` + // Проверка объявления интерфейса + static inline void + fobj__klass_validate_hotState(fobj__klass_handle_t klass); +``` + +### Биндинг метода/интерфейса + +По сути, это всегда биндинг интерфейса. Просто каждый метод определяет +интерфейс с одним этим методом. + +```c + hotMultAndAdd_i hmad = bind_hotMultAndAdd(dst); + hotMultAndAdd_i hmac = bind_hotMultAndAdd(cst); + hotState_i hstd = bind_hotState(dst); + hotState_i hstc = bind_hotState(cst); +``` + +### Вызов метода на интерфейсе + +Заметьте, тут интерфейс передаётся по значению, а не по поинтеру. +Сделал так после того, как один раз ошибся: вместо `$i()` написал `$()`, +и компилятор радостно скомпилировал, т.к. `$()` принимает `void*`. + +```c + printf("%f\n", $i(hotMultAndAdd, hmaa, .mult = 1)); + printf("%f\n", $i(hotMultAndAdd, hmac, .add = 2)); + + printf("%f\n", $i(hotMultAndAdd, hstd)); + printf("%f\n", $i(hotMultAndAdd, hstc)); + printf("%f\n", $i(hotGetState, hstd)); + printf("%f\n", $i(hotGetState, hstc)); + + printf("%f\n", $i(hotAddAndMult, hstc, .mult = 4, .add = 7)); + // Проверка на обязательность аргументов тут работает так же. + // Потому след.вызовы упадут с FATAL: + // $i(hotAddAndMult, hstd, .mult = 1); + // $i(hotAddAndMult, hstd, .add = 1); + // $i(hotAddAndMult, hstd); +``` + +A вот на `hstd` так просто `hotAddAndMult` позвать нельзя: +- `hotDState` этот метод не определял +- `hotAddAndMult` является опциональным методом интерфейса +- потому в `hstd` этот метод остался не заполненным. +Нужно проверять: + +```c + if ($ifilled(hotAddAndMult, hstd)) { + printf("This wont be printed: %f\n", + $i(hotAddAndMult, hstd, .mult=1, .add=2)); + } + if (fobj_iface_filled(hotAddAndMult, hstd)) { /*...*/ } +``` + +Или воспользоваться условным вызовом метода: +```c + if ($iifdef(v =, hotAddAndMult, hstd, .mult = 1, .add = 2)) { + printf("This wont be printed: %f\n", v); + } +``` + +### Проверка реализации интерфейса + +Вызов `bind_someInterface` упадёт с FATAL, если интерфейс не реализован. + +Проверить, реализован ли интерфейс, можно с помощью `$implements()`: + +```c + if ($implements(hotState, dst)) { + workWithObject(dst); + } + + if ($implements(hotState, dst, &hstd)) { + $i(hotGetState, hstd); + } +``` + +### Накладные расходы. + +Интерфейс служит только для типизации, и реализован в виде union размером в 1 +поинтер. В целом, накладные расходы не отличаются от поинтера на объект. + +-------- + +## Время жизни объекта. + +Время жизни объекта управляется методом подсчёта ссылок. + +Когда счётчик доходит до 0, объект уничтожается (и вызывается его fobjDispose, если определён). + +### Счётчик ссылок + +#### Инкремент счётчика ссылок + +Если объект `a` начинает ссылается на объект `b`, то счётчик ссылок `b` +должен быть увеличен на 1. Для этого служит `$ref(fobj)`/`fobj_ref(obj)`. + +```c + // Увеличить счётчик на объекте. + + store->field = $ref(obj); + + // Увеличить счётчик на объекте, возвращённом из метода. + + store->field = $ref($(createObject, fabric)); + +``` + +То же самое, когда вы аллоцируете объект и передаёте ему ссылку на другой +объект + +```c + store = $alloc(Store, .field = $ref(obj)); + + store = $alloc(Store, .field = $ref($(createObject, fabric))); +``` + +#### Декремент счётчика ссылок + +Если объект `a` перестаёт ссылаться на объект `b`, то нужно уменьшить +счётчик ссылок `b` на 1. + +```c + $del(&store->field); +``` + +То же нужно делать и в деструкторе (`fobjDispose`): + +```c + void + Store_fobjDispose(Vself) { + Self(Store); + $del(&Store->field); + } +``` + +#### Корректное перезапись ссылки + +Если требуется переписать ссылку объекта `a` с объекта `b1` на объект +`b2`, то нужно не забыть декремент счётчика на `b1`. Но это нужно сделать +после инкремента счётчика на `b2`, т.к. это может быть один и тот же +объект. + +Чтобы избежать ошибок, используйте: + +```c + $set(&store->field, obj); + + $set(&store->field, $(createObject, fabric)); +``` + +Заметьте: явно звать `$ref` или `$del` *не нужно*. + +### AutoRelease Pool (ARP) + +Для облегчения жизни программиста, используется концепция AutoRelase Pool: +- в начале крупных функций и в теле долгих циклов нужно объявить пул. + Также можно объявить пул для блока, если нужно ограничить время жизни + объектов. +```c + fobj_t + longRunningFunction() + { + FOBJ_FUNC_ARP(); + /*...*/ + for (i = 0; i < 1000000000; i++) { + FOBJ_LOOP_ARP(); + /*...*/ + } + /*...*/ + { + FOBJ_BLOCK_ARP(); + /*...*/ + } + } +``` +- AutoRelease Pool очищается при выходе из скоупа (функции, блока, одной + итерации цикла). Для этого используется расширение GCC (поддержанное + так же clang, tcc, lcc (Эльбрус), и возможно другими) + `__attribute__((cleanup(func))`. + При этом к каждому объекту, помещённым в ARP, применяется `$del` + столько раз, сколько раз объект помещён в пул. + +Все вновь созданные объекты имеют refcnt = 1 и уже помещены в "ближайший" +ARP. Если к новому объекту не применено `$ref`, то он будет уничтожен. + +#### Разлинковка объектов + +Если требуется вернуть объект `b`, с которым объект `a` теряет связь, +то нужно не делать декремент счётчика, а помещать объект в ARP. + +Для этого служит `$unref(b)`: + +```c + fobj_t b = store->field; + store->field = NULL; + return $unref(b); +``` + +Для перезаписи ссылки и возврата предыдущего значения служит `$swap`. +При этом возвращаемое значение помещается в ARP: + +```c + return $swap(&store->field, b2); +``` + +Этим же удобно пользоваться для однострочной разлинковки: + +```c + reutrn $swap(&store->field, NULL); +``` + +#### Спасение объекта + +Как уже говорилось, при выходе из скоупа, для которого объявлен +ARP, объект может быть уничтожен (т.к. счётчик ссылок у него станет +равным 0). + +Можно "спасать" объект, вручную увеличивая счётчик ссылок с помощью +`$ref(obj)`, и потом помещая в ARP пул другого скоупа с помощью +`$unref(obj)`. + +Но для удобства сделаны макросы `$save(obj)`, `$result(obj)` и +`$return(obj)`. + +`$save(obj)` сохраняет объект в случае выхода из блока/цикла. Он берёт +пул, объявленный с помощью `FOBJ_BLOCK_ARP()` или `FOBJ_LOOP_ARP()`, +узнаёт из него ссылку на предыдущий, и сохраняет объект в этом предыдущем +ARP пуле (предварительно сделав инкремент счётчика ссылок). + +`$result(obj)` делает то же самое, но с пулом, объявленным в скоупе +функции с помощью `FOBJ_FUNC_ARP()`. Таким образом, объект можно будет +передать в качестве результата, не опасаясь, что он будет тут же +уничтожен. + +`$return(obj)` разворачивается просто в `return $result(obj)`. + +#### Интерфейсы + +Такие же макросы есть для работы с интерфейсами: +- `$iref(iface)` +- `$iunref(iface)` +- `$iset(&iface_var, iface)` +- `$iswap(&iface_var, iface)` +- `$idel(&iface_var)` +- `$isave(iface)` +- `$iresult(iface)` +- `$ireturn(iface)` + +### Пример + +#### Cвязи между объектами + +```c + typedef struct myKlass2 { + fobj_t someObj; + char* someStr; + } myKlass2; + + #define mth__setSomeObj void, (fobj_t, so) + fobj_method(setSomeObj) + + #define mth__delSomeObj void + fobj_method(delSomeObj) + + #define mth__popSomeObj fobj_t + fobj_method(popSomeObj) + + #define mth__replaceSomeObj void, (fobj_t, so) + fobj_method(replaceSomeObj) + + #define mth__setSomeStr void, (char*, ss) + fobj_method(setSomeStr) + + #define kls__myKlass2 mth(fobjDispose), \ + mth(setSomeObj, delSomeObj, popSomeObj, replaceSomeObj), \ + mth(setSomeStr) + fobj_klass(MyKlass2) + + /* Корректно освобождаем ресурсы */ + myKlass2_fobjDispose(VSelf) { + Self(myKlass2); + $del(&self->someObj); + ft_free(self->someStr); + } + + void + myKlass2_setSomeObj(VSelf, fobj_t so) { + Self(myKlass2); + $set(&self->someObj, so); + } + + void + myKlass2_delSomeObj(VSelf) { + Self(myKlass2); + $del(&self->someObj); + } + + void + myKlass2_popSomeObj(VSelf) { + Self(myKlass2); + return $swap(&self->someObj, NULL); + // Or + // fobj_t old = self->someObj; + // self->someObj = NULL; + // return $unref(old); + } + + void + myKlass2_replaceSomeObj(VSelf, fobj_t so) { + Self(myKlass2); + return $swap(&self->someObj, so); + // Or + // fobj_t old = self->someObj; + // self->someObj = $ref(so); + // return $unref(old); + } + + myKlass2_resetSomeObj(VSelf, fobj_t so) { + Self(myKlass2); + const char *old = self->someStr; + $set(&self->someObj, so); + self->someStr = ft_strdup(ss); + ft_free(old); + } + + myKlass2* + make_MyKlass2(fobj_t so, char *ss) { + return $alloc(myKlass2, + .someObj = $ref(so), + .someStr = ft_strdup(ss)); + } + + myKlass2* + make_set_MyKlass2(fobj_t so, char *ss) { + MyKlass2* mk = $alloc(myKlass2); + mk->someObj = $ref(so); + mk->someStr = ft_strdup(ss); + return mk; + } +``` + +#### Работа с ARP пулом + +```c + // Нужно вернуть объект + fobj_t + doSomethingAndReturn(/*...*/, fobjErr **err) { + FOBJ_FUNC_ARP(); // AutoRelease Pool для функции + fobj_t result; + fobj_t loop_result = NULL; + // Проверим, что err != NULL, и присвоим *err = NULL + fobj_reset_err(err); + + for(/*...*/) { + FOBJ_LOOP_ARP(); // AutoRelease Pool для каждой итерации цикла + fobj_t some = findsomewhere(/*...*/); + + if (isGood(some)) { + // Если не сделать $save(some), то он (возможно) + // уничтожится при выходе из цикла. + loop_result = $save(some); + break; + } + if (tooBad(some)) { + // нужно "вернуть" err + *err = fobj_error("SHIT HAPPENED"); + // Без этого *err будет уничтожен при выходе из функции + $result(*err); + return NULL; + } + } + + result = createKlass(loop_result); + $return(result); + + // Если сделать просто `return result`, то объект уничтожится + // при выходе из функции. + } +``` + +Для быстрого выхода из вложенных ARP пулов можно вручную позвать +`$ref` + `$unref`: + +```c + fobj_t + doSomethingAndReturn(/*...*/) { + FOBJ_FUNC_ARP(); // AutoRelease Pool для функции + fobj_t result; + fobj_t loop_result = NULL; + + { + FOBJ_BLOCK_ARP(); + /*...*/ + for (/*...*/) { + FOBJ_LOOP_ARP(); + /*...*/ + if (/*...*/) { + loop_result = $ref(some); + goto quick_exit; + } + } + } + quick_exit: + // Не забыть поместить в ARP + $unref(loop_result) + /*...*/ + } +``` + +## Инициализация + +В главном исполняемом файле где-нибудь в начале функции `main` нужно позвать: +```c + fobj_init(); +``` +До этого момента создание новых классов (`fobj_klass_init`) будет падать с +FATAL ошибкой. + +Метод подготавливает рантайм и определяет некоторые базовые классы и методы. diff --git a/src/fu_util/fm_util.h b/src/fu_util/fm_util.h new file mode 100644 index 000000000..5c5691512 --- /dev/null +++ b/src/fu_util/fm_util.h @@ -0,0 +1,237 @@ +/* vim: set expandtab autoindent cindent ts=4 sw=4 sts=0 */ +#ifndef FM_UTIL_H +#define FM_UTIL_H + +#define fm_cat_impl(x, y) x##y +#define fm_cat(x, y) fm_cat_impl(x, y) +#define fm_cat3_impl(x, y, z) x##y##z +#define fm_cat3(x, y, z) fm_cat3_impl(x, y, z) +#define fm_cat4_impl(w, x, y, z) w##x##y##z +#define fm_cat4(w, x, y, z) fm_cat4_impl(w, x, y, z) +#define fm_str_impl(...) #__VA_ARGS__ +#define fm_str(...) fm_str_impl(__VA_ARGS__) +#define fm_uniq(x) fm_cat(_##x##_, __COUNTER__) + +#define fm_expand(...) __VA_ARGS__ +#define fm_empty(...) + +#define fm_compl(v) fm_cat(fm_compl_, v) +#define fm_compl_0 1 +#define fm_compl_1 0 +#define fm_and(x, y) fm_cat3(fm__and_, x, y) +#define fm__and_00 0 +#define fm__and_01 0 +#define fm__and_10 0 +#define fm__and_11 1 +#define fm_or(x, y) fm_cat3(fm__or_, x, y) +#define fm__or_00 0 +#define fm__or_01 1 +#define fm__or_10 1 +#define fm__or_11 1 +#define fm_nand(x, y) fm_cat3(fm__nand_, x, y) +#define fm__nand_00 1 +#define fm__nand_01 1 +#define fm__nand_10 1 +#define fm__nand_11 0 +#define fm_nor(x, y) fm_cat3(fm__nor_, x, y) +#define fm__nor_00 1 +#define fm__nor_01 0 +#define fm__nor_10 0 +#define fm__nor_11 0 +#define fm_xor(x, y) fm_cat3(fm__xor_, x, y) +#define fm__xor_00 0 +#define fm__xor_01 1 +#define fm__xor_10 1 +#define fm__xor_11 0 + +#define fm_if(x, y, ...) fm_cat(fm__if_, x)(y, __VA_ARGS__) +#define fm_iif(x) fm_cat(fm__if_, x) +#define fm__if_1(y, ...) y +#define fm__if_0(y, ...) __VA_ARGS__ +#define fm_when(x) fm_cat(fm__when_, x) +#define fm__when_1(...) __VA_ARGS__ +#define fm__when_0(...) + +#define fm_va_comma(...) \ + fm_cat(fm__va_comma_, fm_va_01(__VA_ARGS__))() +#define fm__va_comma_0() +#define fm__va_comma_1() , + +#define fm_or_default(...) \ + fm_cat(fm__or_default_, fm_va_01(__VA_ARGS__))(__VA_ARGS__) +#define fm__or_default_0(...) fm_expand +#define fm__or_default_1(...) __VA_ARGS__ fm_empty + +#define fm__primitive_compare(x, y) fm_is_tuple(COMPARE_##x(COMPARE_##y)(())) +#define fm__is_comparable(x) fm_is_tuple(fm_cat(COMPARE_,x)(())) +#define fm_not_equal(x, y) \ + fm_iif(fm_and(fm__is_comparable(x),fm__is_comparable(y))) \ + (fm__primitive_compare, 1 fm_empty)(x, y) +#define fm_equal(x, y) \ + fm_compl(fm_not_equal(x, y)) + +#define fm_comma(...) , +#define fm__comma , +#define fm_va_single(...) fm__va_single(__VA_ARGS__, fm__comma) +#define fm_va_many(...) fm__va_many(__VA_ARGS__, fm__comma) +#define fm__va_single(x, y, ...) fm__va_result(y, 1, 0) +#define fm__va_many(x, y, ...) fm__va_result(y, 0, 1) +#define fm__va_result(x, y, res, ...) res + +#if !__STRICT_ANSI__ +#define fm_no_va(...) fm__no_va(__VA_ARGS__) +#define fm__no_va(...) fm_va_single(~, ##__VA_ARGS__) +#define fm_va_01(...) fm__va_01(__VA_ARGS__) +#define fm__va_01(...) fm_va_many(~, ##__VA_ARGS__) +#else +#define fm_no_va fm_is_empty +#define fm_va_01 fm_isnt_empty +#endif + +#define fm__is_tuple_choose(a,b,x,...) x +#define fm__is_tuple_help(...) , +#define fm__is_tuple_(...) fm__is_tuple_choose(__VA_ARGS__) +#define fm_is_tuple(x, ...) fm__is_tuple_(fm__is_tuple_help x, 1, 0) + +#define fm_head(x, ...) x +#define fm_tail(x, ...) __VA_ARGS__ + +#define fm_apply_1(macro, x, ...) \ + macro(x) +#define fm_apply_2(macro, x, y, ...) \ + macro(x, y) +#define fm_apply_3(macro, x, y, z, ...) \ + macro(x, y, z) +#define fm_apply_tuple_1(macro, x, ...) \ + macro x +#define fm_apply_tuple_2(macro, x, y, ...) \ + fm__apply_tuple_2(macro, x, fm_expand y) +#define fm__apply_tuple_2(macro, x, ...) \ + macro(x, __VA_ARGS__) + +#define fm_tuple_expand(x) fm_expand x +#define fm_tuple_tag(x) fm_head x +#define fm_tuple_data(x) fm_tail x +#define fm_tuple_0(x) fm_head x +#define fm_tuple_1(x) fm__tuple_1 x +#define fm__tuple_1(_0, _1, ...) _1 +#define fm_tuple_2(x) fm__tuple_2 x +#define fm__tuple_2(_0, _1, _2, ...) _2 + +#define fm__tuple_tag_or_0_choose(a,x,...) x +#define fm__tuple_tag_or_0_help(tag, ...) , tag +#define fm__tuple_tag_or_0_(...) fm__tuple_tag_or_0_choose(__VA_ARGS__) +#define fm_tuple_tag_or_0(x) fm__tuple_tag_or_0_(fm__tuple_tag_or_0_help x, 0) + +#define fm_dispatch_tag_or_0(prefix, x) \ + fm_cat(prefix, fm_tuple_tag_or_0(x)) + +#define fm_va_012(...) \ + fm_if(fm_no_va(__VA_ARGS__), 0, fm__va_12(__VA_ARGS__)) +#define fm__va_12(...) \ + fm_if(fm_va_single(__VA_ARGS__), 1, 2) + +// recursion handle +#define fm_defer(id) id fm_empty() +#define fm_recurs(id) id fm_defer(fm_empty)() + +#if __STRICT_ANSI__ +#define fm__is_emptyfirst(x, ...) fm_iif(fm_is_tuple(x))(0, fm__is_emptyfirst_impl(x)) +#define fm__is_emptyfirst_impl(x,...) fm_tuple_2((\ + fm__is_emptyfirst_do1 x (fm__is_emptyfirst_do2), 1, 0)) +#define fm__is_emptyfirst_do1(F) F() +#define fm__is_emptyfirst_do2(...) , +#define fm_is_empty(...) fm_and(fm__is_emptyfirst(__VA_ARGS__), fm_va_single(__VA_ARGS__)) +#define fm_isnt_empty(...) fm_nand(fm__is_emptyfirst(__VA_ARGS__), fm_va_single(__VA_ARGS__)) +#else +#define fm_is_empty fm_no_va +#define fm_isnt_empty fm_va_01 +#endif + +#define fm_eval(...) fm__eval_0(__VA_ARGS__) +#define fm_eval2(...) fm__eval_0(__VA_ARGS__) +#define fm_eval3(...) fm__eval_0(__VA_ARGS__) +#define fm_eval4(...) fm__eval_0(__VA_ARGS__) +#define fm_eval5(...) fm__eval_0(__VA_ARGS__) +#define fm__eval_0(...) fm__eval_1(fm__eval_1(fm__eval_1(fm__eval_1(__VA_ARGS__)))) +#define fm__eval_1(...) fm__eval_2(fm__eval_2(fm__eval_2(__VA_ARGS__))) +#define fm__eval_2(...) fm__eval_3(fm__eval_3(fm__eval_3(__VA_ARGS__))) +#ifdef FU_LONG_EVAL +#define fm__eval_3(...) fm__eval_4(fm__eval_4(fm__eval_4(__VA_ARGS__))) +#define fm__eval_4(...) __VA_ARGS__ +#else +#define fm__eval_3(...) __VA_ARGS__ +#endif + +#define fm_foreach(macro, ...) \ + fm_when(fm_va_01(__VA_ARGS__))( \ + fm_apply_1(macro, __VA_ARGS__) \ + fm_recurs(fm_cat) (fm_, foreach) (\ + macro, fm_tail(__VA_ARGS__) \ + ) \ + ) + +#define fm_foreach_arg(macro, arg, ...) \ + fm_when(fm_va_01(__VA_ARGS__))( \ + fm_apply_2(macro, arg, __VA_ARGS__) \ + fm_recurs(fm_cat) (fm_, foreach_arg) (\ + macro, arg, fm_tail(__VA_ARGS__) \ + ) \ + ) + +#define fm_catx(x, y) fm_cat_impl(x, y) +#define fm_foreach_comma(macro, ...) \ + fm_when(fm_va_01(__VA_ARGS__))( \ + fm_apply_1(macro, __VA_ARGS__\ + )fm_if(fm_va_single(__VA_ARGS__), , fm__comma)\ + fm_recurs(fm_catx) (fm_, foreach_comma) (\ + macro, fm_tail(__VA_ARGS__) \ + ) \ + ) + + +#define fm_foreach_tuple(macro, ...) \ + fm_when(fm_va_01(__VA_ARGS__))( \ + fm_apply_tuple_1(macro, __VA_ARGS__) \ + fm_recurs(fm_cat) (fm_, foreach_tuple) (\ + macro, fm_tail(__VA_ARGS__) \ + ) \ + ) + +#define fm_foreach_tuple_arg(macro, arg, ...) \ + fm_when(fm_va_01(__VA_ARGS__))( \ + fm_apply_tuple_2(macro, arg, __VA_ARGS__) \ + fm_recurs(fm_cat) (fm_, foreach_tuple_arg) (\ + macro, arg, fm_tail(__VA_ARGS__) \ + ) \ + ) + +#define fm_foreach_tuple_comma(macro, ...) \ + fm_when(fm_va_01(__VA_ARGS__))( \ + fm_apply_tuple_1(macro, __VA_ARGS__\ + )fm_if(fm_va_single(__VA_ARGS__), fm_empty(), fm__comma)\ + fm_recurs(fm_cat) (fm_, foreach_tuple_comma) (\ + macro, fm_tail(__VA_ARGS__) \ + ) \ + ) + + +#define fm_eval_foreach(macro, ...) \ + fm_eval(fm_foreach(macro, __VA_ARGS__)) + +#define fm_eval_foreach_arg(macro, arg, ...) \ + fm_eval(fm_foreach_arg(macro, arg, __VA_ARGS__)) + +#define fm_eval_tuples(macro, ...) \ + fm_eval(fm_foreach_tuple(macro, __VA_ARGS__)) + +#define fm_eval_tuples_arg(macro, arg, ...) \ + fm_eval(fm_foreach_tuple_arg(macro, arg, __VA_ARGS__)) + +#define fm_eval_tuples_comma(macro, ...) \ + fm_eval(fm_foreach_tuple_comma(macro, __VA_ARGS__)) + +#define fm__dumb_require_semicolon \ + struct __dumb_struct_declaration_for_semicolon + +#endif diff --git a/src/fu_util/fo_obj.h b/src/fu_util/fo_obj.h new file mode 100644 index 000000000..43fbf5c33 --- /dev/null +++ b/src/fu_util/fo_obj.h @@ -0,0 +1,592 @@ +/* vim: set expandtab autoindent cindent ts=4 sw=4 sts=4 */ +#ifndef FOBJ_OBJ_H +#define FOBJ_OBJ_H + +#include + +typedef void* fobj_t; + +#include +#include + +/* + * Pointer to "object*. + * In fact, it is just 'void *'. + */ +/* + * First argument, representing method receiver. + * Unfortunately, function couldn't have arbitrary typed receiver without issueing + * compiller warning. + * Use Self(Klass) to convert to concrete type. + */ +#define VSelf fobj_t Vself +/* + * Self(Klass) initiate "self" variable with casted pointer. + */ +#define Self(Klass) Self_impl(Klass) + +extern void fobj_init(void); +/* + * fobj_freeze forbids further modifications to runtime. + * It certainly should be called before additional threads are created. + */ +extern void fobj_freeze(void); + +#define fobj_self_klass 0 + +#include "./impl/fo_impl.h" + +/* Generate all method boilerplate. */ +#define fobj_method(method) fobj__define_method(method) + +/* + * Ensure method initialized. + * Calling fobj_method_init is not required, + * unless you want search method by string name or use `fobj_freeze` + */ +#define fobj_method_init(method) fobj__method_init(method) + +/* Declare klass handle */ +#define fobj_klass(klass) fobj__klass_declare(klass) +/* + * Implement klass handle. + * Here all the binding are done, therefore it should be called + * after method implementions or at least prototypes. + * Additional declarations could be passed here. + */ +#define fobj_klass_handle(klass, ...) fobj__klass_handle(klass, __VA_ARGS__) +/* + * Calling fobj_klass_init is not required, + * unless you want search klass by string name or use `fobj_freeze` + */ +#define fobj_klass_init(klass) fobj__klass_init(klass) +#define fobj_add_methods(klass, ...) fobj__add_methods(klass, __VA_ARGS__) + +#define fobj_iface(iface) fobj__iface_declare(iface) + +/* + * Allocate klass instance, and optionally copy fields. + * + * $alloc(klass) + * fobj_alloc(klass) + * allocates instance + * $alloc(klass, .field1 = val1, .field2 = val2) - + * fobj_alloc(klass, .field1 = val1, .field2 = val2) - + * allocates instance + * copies `(klass){.field1 = val1, .field2 = val2}` + */ +#define fobj_alloc(klass, ...) \ + fobj__alloc(klass, __VA_ARGS__) +#define $alloc(klass, ...) \ + fobj__alloc(klass, __VA_ARGS__) + +/* + * Allocate variable sized instance with additional size. + * Size should be set in bytes, not variable sized field elements count. + * Don't pass variable sized fields as arguments, they will not be copied. + * Fill variable sized fields later. + * + * fobj_alloc_sized(Klass, size) + * allocates instance with custom additional `size` + * returns obj + * fobj_alloc_sized(Klass, size, .field1 = val1, .field2 = val2) + * allocates instance with custom additional `size` + * copies `(klass){.field1 = val1, .field2 = val2}` + * returns obj + */ +#define fobj_alloc_sized(klass, size, ...) \ + fobj__alloc_sized(klass, size, __VA_ARGS__) + +/* + * Object lifetime. + * + * $ref(obj) + * Add reference to object. + * Manually increments reference count. It will prevent object's destruction. + * $unref(obj) + * Forget reference to object, but keep object alive (for some time). + * Will put object to AutoRelease Pool, so it will be destroyed later. + * $del(&var) + * Drop reference to object and (probably) destroy it. + * Manually decrement reference count and clear variable. + * It will destroy object, if its reference count become zero. + * $set(&var, obj) + * Replace value, pointed by first argument, with new value. + * New value will be passed to `$ref` and assigned to ptr. + * Then old value will be passed to `$del`, so it could be destroyed at this + * moment. + * $swap(&var, obj) + * Replace value, pointed by first argument, with new value, and return old + * value. + * New value will be passed to `$ref` and assigned to ptr. + * Then old value will be passed to `$unref`, so preserved in ARP. + * + * Same routines for interfaces: + * + * $iref(obj) + * $iunref(obj) + * $idel(&var) + * $iset(&var, iface) + * $iswap(&var, iface) + * + * AutoRelease Pool. + * + * AutoRelease Pool holds references to objects "about to be destroyed". + * + * AutoRelease Pool is drained on scope exit using GCC's __attribute__((cleanup)), + * and all objects stored in ARP are passed to $del. + * + * Newly created objects are always registered in current Autorelelease Pool, + * and if no $ref(obj), $set(var, obj) or $swap is called with them, they will be + * automatially destroyed on scope exit. + * + * As well, if you replace/delete object in some container and return old value, it + * should be put into AutoRelease Pool to be preserved for value acceptor. + * + * FOBJ_FUNC_ARP() + * Declare autorelease pool for function scope. + * FOBJ_LOOP_ARP() + * Declare autorelease pool for loop body. + * FOBJ_BLOCK_ARP() + * Declare autorelease pool for block body. + * + * $save(obj), $isave(iface) + * Increment reference and store object in parent autorelease pool. + * It is used to preserve object on loop or block exit with autorelease pool + * declared (`FOBJ_LOOP_ARP()` or `FOBJ_BLOCK_ARP()`). + * $result(obj), $iresult(iface) + * Increment reference and store object in autorelease pool parent to + * function's one. + * It is used to preserve object on exit from function with autorelease pool + * declared (`FOBJ_FUNC_ARP()`). + * $return(obj), $ireturn(obj) + * is just `return $result(obj)` + */ +#define $ref(obj) fobj_ref(obj) +#define $unref(obj) fobj_unref(obj) +#define $del(var) $set(var, NULL) +#define $set(var, obj) fobj__set_impl((var), (obj)) +#define $swap(var, obj) fobj__swap_impl((var), (obj)) + +extern fobj_t fobj_ref(fobj_t obj); +extern fobj_t fobj_unref(fobj_t obj); +#define fobj_del(var) fobj_set(var, NULL) +extern void fobj_set(fobj_t* var, fobj_t newval); +extern fobj_t fobj_swap(fobj_t* var, fobj_t newval); + +#define $iref(iface) fobj__iref(iface) +#define $iunref(iface) fobj__iunref(iface) +#define $idel(iface) fobj__idel(iface) +#define $iset(ptr, iface) fobj__iset((ptr), (iface)) +#define $iswap(ptr, iface) fobj__iswap((ptr), (iface)) + +#define FOBJ_FUNC_ARP() FOBJ_ARP_POOL(fobj__func_ar_pool) +#define FOBJ_LOOP_ARP() FOBJ_ARP_POOL(fobj__block_ar_pool) +#define FOBJ_BLOCK_ARP() FOBJ_ARP_POOL(fobj__block_ar_pool) + +#define $save(obj) fobj_store_to_parent_pool($ref(obj), &fobj__block_ar_pool) +#define $result(obj) fobj_store_to_parent_pool($ref(obj), &fobj__func_ar_pool) +#define $return(obj) return $result(obj) + +#define $isave(iface) fobj__isave(iface) +#define $iresult(iface) fobj__iresult(iface) +#define $ireturn(iface) fobj__ireturn(iface) + +/* + * fobjDispose should finish all object's activity and release resources. + * It is called automatically before destroying object, but could be + * called manually as well using `fobj_dispose` function. `fobjDispose` could + * not be called directly. + * Therefore after fobjDispose object should be accessible, ie call of any + * method should not be undefined. But it should not be usable, ie should + * not do any meaningful job. + */ +#define mth__fobjDispose void +fobj__special_void_method(fobjDispose); +#define $dispose(obj) fobj_dispose(obj) +extern void fobj_dispose(fobj_t); + +/* check if object is disposing or was disposed */ +extern bool fobj_disposing(fobj_t); +extern bool fobj_disposed(fobj_t); + +/* + * returns globally allocated klass name. + * DO NOT modify it. + */ +extern const char *fobj_klass_name(fobj_klass_handle_t klass); + +/* + * Return real klass of object. + * + * Note: `fobjKlass` is a method, so it could return something dirrefent from + * real klass. But if you have to cast pointer, you'd probably need real klass. + * + * But in other cases you'd better not abuse this function. + */ +extern fobj_klass_handle_t fobj_real_klass_of(fobj_t); + +/* + * Call method with named/optional args. + * + * $(someMethod, object) + * $(someMethod, object, v1, v2) + * $(someMethod, object, .arg1=v1, .arg2=v2) + * $(someMethod, object, .arg2=v2, .arg1=v1) + * // Skip optional .arg3 + * $(someMethod, object, v1, v2, .arg4=v4) + * $(someMethod, object, .arg1=v1, .arg2=v2, .arg4=v4) + * // Order isn't important with named args. + * $(someMethod, object, .arg4=v4, .arg1=v1, .arg2=v2) + * $(someMethod, object, .arg4=v4, .arg2=v2, .arg1=v1) + * + * fobj_call(someMethod, object) + * fobj_call(someMethod, object, v1, v2) + * fobj_call(someMethod, object, .arg1=v1, .arg2=v2) + * fobj_call(someMethod, object, v1, v2, .arg4=v4) + * fobj_call(someMethod, object, .arg1=v1, .arg2=v2, .arg4=v4) + */ +#define $(meth, self, ...) \ + fobj_call(meth, self, __VA_ARGS__) + +/* + * Call parent klass method implementation with named/optional args. + * + * $super(someMethod, object) + * $super(someMethod, object, v1, v2, .arg4=v4) + * $super(someMethod, object, .arg1=v1, .arg2=v2, .arg4=v4) + * fobj_call_super(someMethod, object) + * fobj_call_super(someMethod, object, v1, v2) + * fobj_call_super(someMethod, object, v1, v2, .arg4=v4) + * fobj_call_super(someMethod, object, .arg1=v1, .arg2=v2, .arg4=v4) + * + * It uses variable set inside of Self(klass) statement. + */ +#define $super(meth, self, ...) \ + fobj_call_super(meth, fobj__klassh, self, __VA_ARGS__) + +/* + * Call method stored in the interface struct. + * Interface is passed by value, not pointer. + * + * SomeIface_i someIface = bind_SomeIface(obj); + * $i(someMethod, someIface) + * $i(someMethod, someIface, v1, v2, .arg4=v4) + * $i(someMethod, someIface, .arg1=v1, .arg2=v2, .arg4=v4) + * fobj_iface_call(someMethod, someIface) + * fobj_iface_call(someMethod, someIface, v1, v2) + * fobj_iface_call(someMethod, someIface, v1, v2, .arg4=v4) + * fobj_iface_call(someMethod, someIface, .arg1=v1, .arg2=v2, .arg4=v4) + */ +#define $i(meth, iface, ...) \ + fobj_iface_call(meth, iface, __VA_ARGS__) + +/* + * Determine if object implements interface. + * + * if ($implements(someIface, object, &iface_var)) { + * $i(someMethod, iface_var); + * } + * + * if ($implements(someIface, object)) { + * workWith(object); + * } + * + * if (fobj_implements(iface, object, &iface_var)) { + * fobj_iface_call(someMethod, iface_var); + * } + * + * if (fobj_implements(iface, object)) { + * workWith(object); + * } + * + * And without macroses: + * + * if (implements_someIface(object, &iface_var)) { + * $i(someMethod, iface_var); + * } + * + * if (implements_someIface(object, NULL)) { + * workWith(object); + * } + */ +#define $implements(iface, obj, ...) \ + fobj__implements(iface, obj, __VA_ARGS__) +#define fobj_implements(iface, obj, ...) \ + fobj__implements(iface, obj, __VA_ARGS__) + +/* + * Determine if optional method is filled in interface. + * Note: required methods are certainly filled. + * + * if ($ifilled(someMethod, iface)) { + * $i(someMethod, iface); + * } + * + * if (fobj_iface_filled(someMethod, iface)) { + * fobj_iface_call(someMethod, iface); + * } + */ +#define $ifilled(meth, iface) \ + fobj_iface_filled(meth, iface) + +/* + * Call method if it is defined, and assign result. + * + * value_t val; + * if ($ifdef(val =, someMethod, self, v1, v2, .arg4=v4)) { + * ... + * } + * + * or doesn't assign result + * + * if ($ifdef(, someMethod, self, v1, v2, .arg4=v4)) { + * ... + * } + */ +#define $ifdef(assignment, meth, self, ...) \ + fobj_ifdef(assignment, meth, (self), __VA_ARGS__) + +#define $iifdef(assignment, meth, iface, ...) \ + fobj_iface_ifdef(assignment, meth, iface, __VA_ARGS__) + +#define $bind(iface_type, obj) fobj_bind(iface_type, (obj)) +#define $reduce(newiface, iface) fobj_reduce(newiface, (iface)) + +#define $isNULL(iface) ((iface).self == NULL) +#define $notNULL(iface) ((iface).self != NULL) +#define $setNULL(ifacep) ((ifacep)->self = NULL) +#define $null(iface_type) ((iface_type##_i){NULL}) +/* + * Base type + */ +#define mth__fobjRepr struct fobjStr* +fobj_method(fobjRepr); +#define mth__fobjKlass fobj_klass_handle_t +fobj_method(fobjKlass); + +#define $repr(obj) $(fobjRepr, (obj)) +#define $irepr(iface) $(fobjRepr, (iface).self) + +typedef struct fobjBase { + char fobj__base[0]; +} fobjBase; +#define kls__fobjBase mth(fobjKlass, fobjRepr) +fobj_klass(fobjBase); + +/* + * fobjFormat should be defined for pretty printing + */ +#define mth__fobjFormat void, (ft_strbuf_t*, out), (const char*, fmt) +#define mth__fobjFormat__optional() (fmt, NULL) +fobj_method(fobjFormat); + +/********************************* + * String + */ + +typedef struct fobjStr { + const char *ptr; + uint32_t len; + char _buf[]; /* private buffer for copied string */ +} fobjStr; + +ft_inline fobjStr* fobj_str(const char* s); +#define $S(s) fobj_str(s) +extern fobjStr* fobj_newstr(ft_str_t str, bool gifted); +ft_inline ft_str_t fobj_getstr(fobjStr *str); + +/* + * Steal if buffer is allocated, or copy otherwise. + * Buffer is zeroed and should be re-initialized. + */ +ft_inline fobjStr* fobj_strbuf_steal(ft_strbuf_t *buf); + +ft_gnu_printf(1, 2) +extern fobjStr* fobj_sprintf(const char* fmt, ...); +extern fobjStr* fobj_strcat(fobjStr *ostr, ft_str_t str); +ft_inline fobjStr* fobj_strcatc(fobjStr *ostr, const char *str); +ft_inline fobjStr* fobj_strcatc2(fobjStr *ostr, const char *str1, const char *str2); +ft_inline fobjStr* fobj_stradd(fobjStr *ostr, fobjStr *other); +ft_gnu_printf(2, 3) +extern fobjStr* fobj_strcatf(fobjStr *str, const char* fmt, ...); + +/* String comparison */ +ft_inline bool fobj_streq(fobjStr* self, fobjStr *oth); +ft_inline FT_CMP_RES fobj_strcmp(fobjStr* self, fobjStr *oth); +ft_inline bool fobj_streq_str(fobjStr* self, ft_str_t oth); +ft_inline FT_CMP_RES fobj_strcmp_str(fobjStr* self, ft_str_t oth); +ft_inline bool fobj_streq_c(fobjStr* self, const char *oth); +ft_inline FT_CMP_RES fobj_strcmp_c(fobjStr* self, const char *oth); + +/* turn object to string using fobjFormat */ +extern fobjStr* fobj_tostr(fobj_t obj, const char* fmt); +#define $tostr(obj, ...) fobj_tostr((obj), fm_or_default(__VA_ARGS__)(NULL)) +#define $itostr(obj, ...) fobj_tostr((obj).self, fm_or_default(__VA_ARGS__)(NULL)) + +#define kls__fobjStr mth(fobjRepr, fobjFormat) +fobj_klass(fobjStr); + +/********************************** + * Int + */ + +typedef struct fobjInt { + int64_t i; +} fobjInt; + +ft_inline fobjInt* fobj_int(int64_t i); +#define $I(i) fobj_int(i) + +#define kls__fobjInt mth(fobjRepr, fobjFormat) +fobj_klass(fobjInt); + +/********************************** + * UInt + */ + +typedef struct fobjUInt { + uint64_t u; +} fobjUInt; + +ft_inline fobjUInt* fobj_uint(uint64_t u); +#define $U(i) fobj_uint(i) + +#define kls__fobjUInt mth(fobjRepr, fobjFormat) +fobj_klass(fobjUInt); + +/********************************** + * Float + */ + +typedef struct fobjFloat { + double f; +} fobjFloat; + +ft_inline fobjFloat* fobj_float(double f); +#define $F(f) fobj_float(f) + +#define kls__fobjFloat mth(fobjRepr, fobjFormat) +fobj_klass(fobjFloat); + +/********************************** + * Bool + */ + +typedef struct fobjBool { + bool b; +} fobjBool; + +ft_inline fobjBool* fobj_bool(bool f); +#define $B(f) fobj_bool(f) + +#define kls__fobjBool mth(fobjRepr, fobjFormat) +fobj_klass(fobjBool); + +/********************************** + * kv + */ +typedef struct fobj_kv { + const char * key; + fobj_t value; +} fobj_kv; + +#define FT_SLICE fokv +#define FT_SLICE_TYPE fobj_kv +#include +#define FT_SEARCH fokv +#define FT_SEARCH_TYPE fobj_kv +#define FT_SEARCH_PATTERN const char* +#include +ft_inline FT_CMP_RES fobj_fokv_cmpc(fobj_kv kv, const char* nm) { + return strcmp(kv.key, nm); +} + +extern fobjStr* fobj_printkv(const char *fmt, ft_slc_fokv_t kv); +#define $fmt(fmt, ...) fobj__printkv(fmt, __VA_ARGS__) + + +/********************************** + * ERRORS + */ + +#define iface__err +fobj_iface(err); + +#define fobj_error_kind(err) fobj__error_kind(err) +#define fobj_error_flag_key(key) fobj__error_flag_key(key) +#define fobj_error_int_key(key) fobj__error_int_key(key) +#define fobj_error_uint_key(key) fobj__error_uint_key(key) +#define fobj_error_cstr_key(key) fobj__error_cstr_key(key) +#define fobj_error_float_key(key) fobj__error_float_key(key) +#define fobj_error_bool_key(key) fobj__error_bool_key(key) +#define fobj_error_object_key(key) fobj__error_object_key(key) + +extern ft_arg_t fobj_err_getkv(err_i err, const char *key, ft_arg_t dflt, bool *found); + +fobj_error_kind(RT); +fobj_error_kind(SysErr); + +fobj_error_object_key(cause); +fobj_error_int_key(errNo); +fobj_error_cstr_key(errStr); +#define fobj_errno_keys(errno) (errNo, errno), (errStr, ft_strerror(errno)) +fobj_error_cstr_key(path); +fobj_error_cstr_key(old_path); +fobj_error_cstr_key(new_path); + +/* special key for raw appending to error message */ +fobj_error_cstr_key(__msgSuffix); + +/* + * $err(Type) + * $err(Type, "some error") + * $err(Type, "Some bad thing happens at {path}", (path, filename)) + */ +#define $err(type, ...) fobj_make_err(type, __VA_ARGS__) +#define $noerr(...) fm_if(fm_va_01(__VA_ARGS__), $isNULL(__VA_ARGS__), $null(err)) +#define $haserr(err) $notNULL(err) + +/* + * $syserr() + * $syserr("allocation error") + * $syserr("Could not open file {path}", (path, filename)) + */ +#define $syserr(...) fobj_make_syserr(__VA_ARGS__) + +/* fetch key back */ +#define $errkey(key, err, ...) fobj__err_getkey(key, err, __VA_ARGS__) +ft_inline int getErrno(err_i err); +ft_inline const char* getErrnoStr(err_i err); + +/* + * Get error type + */ +#define $errtype(err) fobj_errtype(err) + +/* + * Get error message + */ +#define $errmsg(err) fobj_errmsg(err) +/* + * Get error location + */ +#define $errsrc(err) fobj_errsrc(err) + +#define kls__fobjErr mth(fobjDispose, fobjRepr, fobjFormat) +fobj_klass(fobjErr); + +/* + * Combines two error by placing second into single linked list of siblings. + * If either of error is NULL, other error is returned. + * If both errors are NULL, then NULL is returned. + * If second already has siblings, first's list of siblings is appended to + * second's list, then second becames first sibling of first. + */ +extern err_i fobj_err_combine(err_i first, err_i second); + +#define fobj_reset_err(err) do { ft_dbg_assert(err); *err = (err_i){NULL}; } while(0) + +#include "./impl/fo_impl2.h" + +#endif diff --git a/src/fu_util/ft_ar_examples.h b/src/fu_util/ft_ar_examples.h new file mode 100644 index 000000000..12cbd3b8e --- /dev/null +++ b/src/fu_util/ft_ar_examples.h @@ -0,0 +1,71 @@ +/* vim: set expandtab autoindent cindent ts=4 sw=4 sts=4 */ +#ifndef FT_AR_EXAMPLES_H +#define FT_AR_EXAMPLES_H + +/* + * Slices and arrays for int. + * Defines slice: + * + * typedef struct { int *ptr; size_t len; } ft_slc_int_t; + * + * ft_slc_int_t ft_slc_int_make(int *ptr, size_t len); + * ft_slc_int_t ft_slc_int_alloc(int *ptr, size_t len); + * + * int ft_slc_int_at(ft_slc_int_t *ptr, ssize_t at); + * int ft_slc_int_set(ft_slc_int_t *ptr, ssize_t at, int v); + * + * ft_slc_int_t ft_slc_int_slice(ft_slc_int_t *ptr, ssize_t start, ssize_t end) + * + * void ft_slc_int_each(ft_slc_int_t *, void (*each)(int)); + * void ft_slc_int_each_r(ft_slc_int_t *, void (*each)(int, ft_arg_t), ft_arg_t); + * + * Defines array: + * + * typedef struct { int *ptr; size_t len; size_t cap; } ft_arr_int_t; + * ft_arr_int_t ft_arr_int_alloc(int *ptr, size_t len); + * + * int ft_arr_int_at (ft_arr_int_t *ptr, ssize_t at); + * int ft_arr_int_set(ft_arr_int_t *ptr, ssize_t at, int v); + * + * ft_slc_int_t ft_arr_int_slice(ft_arr_int_t *ptr, ssize_t start, ssize_t end) + * + * void ft_arr_int_each (ft_arr_int_t *, void (*each)(int)); + * void ft_arr_int_each_r(ft_arr_int_t *, void (*each)(int, ft_arg_t), ft_arg_t); + * + * void ft_arr_int_ensure(ft_arr_int_t *, size_t addcapa); + * void ft_arr_int_recapa(ft_arr_int_t *, size_t newcapa); + * void ft_arr_int_resize(ft_arr_int_t *, size_t newsize); + * + * void ft_arr_int_insert_at(ft_arr_int_t *, ssize_t at, int el); + * void ft_arr_int_insert_n (ft_arr_int_t *, ssize_t at, int *el, size_t n); + * void ft_arr_int_push (ft_arr_int_t *, int el); + * void ft_arr_int_append (ft_arr_int_t *, int *el, size_t n); + * + * int ft_arr_int_del_at (ft_arr_int_t *, ssize_t at); + * int ft_arr_int_pop (ft_arr_int_t *); + * void ft_arr_int_del_slice(ft_arr_int_t *, ssize_t start, ssize_t end); + * + * void ft_array_walk (ft_arr_int_t *, + * FT_WALK_ACT (*walk)(intl)) + * void ft_array_walk_r(ft_arr_int_t *, + * FT_WALK_ACT (*walk)(int, ft_arg_t), ft_arg_t) + */ +#define FT_SLICE int +#define FT_SLICE_TYPE int +#include + +/* + * Slices and arrays for C strings + */ +#define FT_SLICE cstr +#define FT_SLICE_TYPE const char* +#include + +/* + * Slices and arrays for C strings + */ +#define FT_SLICE void +#define FT_SLICE_TYPE void* +#include + +#endif diff --git a/src/fu_util/ft_array.inc.h b/src/fu_util/ft_array.inc.h new file mode 100644 index 000000000..115aa210e --- /dev/null +++ b/src/fu_util/ft_array.inc.h @@ -0,0 +1,590 @@ +/* vim: set expandtab autoindent cindent ts=4 sw=4 sts=4 */ +#include + +/* + * Accepts 2 macroses: + * - FT_SLICE - name suffix + * - FT_SLICE_TYPE - element type. + * + * Produces: + * typedef struct { + * FT_SLICE_TYPE* ptr; + * size_t len; + * } ft_slc_FT_SLICE_t; + * + * typedef struct { + * FT_SLICE_TYPE* ptr; + * size_t len; + * size_t cap; + * } ft_arr_FT_SLICE_t; + * + * - create slice struct + * + * ft_slc_FT_SLICE_t + * ft_slc_FT_SLICE_make(FT_SLICE_TYPE_t *ptr, size_t len); + * + * - allocate memory and copy data + * + * ft_slc_FT_SLICE_t + * ft_slc_FT_SLICE_alloc(FT_SLICE_TYPE_t *ptr, size_t len); + * + * ft_arr_FT_SLICE_t + * ft_arr_FT_SLICE_alloc(FT_SLICE_TYPE_t *ptr, size_t len); + * + * - take an element. + * if `at < 0`, then takes at `len - at` + * + * FT_SLICE_TYPE + * ft_slc_FT_SLICE_at(ft_slc_FT_SLICE_t *sl, ssize_t at); + * + * FT_SLICE_TYPE + * ft_arr_FT_SLICE_at(ft_arr_FT_SLICE_t *sl, ssize_t at); + * + * - set an element. + * if `at < 0`, then sets at `len - at` + * + * FT_SLICE_TYPE + * ft_slc_FT_SLICE_set(ft_slc_FT_SLICE_t *sl, ssize_t at, FT_SLICE_TYPE el); + * + * FT_SLICE_TYPE + * ft_arr_FT_SLICE_set(ft_arr_FT_SLICE_t *sl, ssize_t at, FT_SLICE_TYPE el); + * + * - take subslice + * `start` and `end` are normalized as index in `*_at`, `*_set` functions. + * Additionally, FT_SLICE_END could be used as a slice end. + * + * ft_slc_FT_SLICE_t + * ft_slc_FT_SLICE_slice(ft_slc_FT_SLICE_t *sl, ssize_t start, ssize end); + * + * ft_slc_FT_SLICE_t + * ft_arr_FT_SLICE_slice(ft_arr_FT_SLICE_t *sl, ssize_t start, ssize end); + * + * - call function for each element by value + * + * void + * ft_slc_FT_SLICE_each(ft_slc_FT_SLICE_t *, void (*)(FT_SLICE_TYPE)); + * + * void + * ft_slc_FT_SLICE_each_r(ft_slc_FT_SLICE_t *, + * void (*)(FT_SLICE_TYPE, ft_arg_t), ft_arg_t); + * + * void + * ft_arr_FT_SLICE_each(ft_arr_FT_SLICE_t *, void (*)(FT_SLICE_TYPE)); + * + * void + * ft_arr_FT_SLICE_each_r(ft_arr_FT_SLICE_t *, + * void (*)(FT_SLICE_TYPE, ft_arg_t), ft_arg_t); + * + * Following are only for ARRAY: + * + * - ensure array's capacity for additional elements + * + * void + * ft_arr_FT_SLICE_ensure(ft_arr_FT_SLICE_t *arr, size_t addelems); + * + * - ensure array's total capacity (or decrease it) + * It rounds up capacity to power of 2. + * It will panic if new capacity is less than lenght. + * + * void + * ft_arr_FT_SLICE_recapa(ft_arr_FT_SLICE_t *arr, size_t newcapa); + * + * - truncate or zero extend array. + * + * void + * ft_arr_FT_SLICE_resize(ft_arr_FT_SLICE_t *arr, size_t newsize); + * + * - push one or many elements to end of array + * + * void + * ft_arr_FT_SLICE_push(ft_arr_FT_SLICE_t *arr, FT_SLICE_TYPE el); + * + * void + * ft_arr_FT_SLICE_append(ft_arr_FT_SLICE_t *arr, FT_SLICE_TYPE *el, size_t n); + * + * - insert one or many elements into middle of array + * + * void + * ft_arr_FT_SLICE_insert_at(ft_arr_FT_SLICE_t *arr, size_t at, FT_SLICE_TYPE el); + * + * void + * ft_arr_FT_SLICE_insert_n(ft_arr_FT_SLICE_t *arr, size_t at, FT_SLICE_TYPE *el, size_t n); + * + * - delete one or many elements + * + * FT_SLICE_TYPE + * ft_arr_FT_SLICE_pop(ft_arr_FT_SLICE_t *arr); + * + * FT_SLICE_TYPE + * ft_arr_FT_SLICE_del_at(ft_arr_FT_SLICE_t *arr, size_t at); + * + * void + * ft_arr_FT_SLICE_del_slice(ft_arr_FT_SLICE_T *arr, ssize_t start, ssize_t end); + * + * - controllable array iteration. + * Callback may tell what to do: + * FT_WALK_CONT - continue + * FT_WALK_BREAK - break + * FT_WALK_DEL - delete current element and continue + * FT_WALK_DEL_BREAK - delete current element and break + * + * void + * ft_arr_FT_SLICE_walk(ft_arr_FT_SLICE_T *arr, + * FT_WALK_ACT (*walk)(FT_SLICE_TYPE *el)) + * + * void + * ft_arr_FT_SLICE_walk_r(ft_arr_FT_SLICE_T *arr, + * FT_WALK_ACT (*walk)(FT_SLICE_TYPE *el, ft_arg_t arg), + * ft_arg_t arg) + */ + +#define ft_slice_pref fm_cat(ft_slc_, FT_SLICE) +#define ft_array_pref fm_cat(ft_arr_, FT_SLICE) +#define ft_slice_type fm_cat(ft_slice_pref,_t) +#define ft_array_type fm_cat(ft_array_pref,_t) + +#define ft_slice_make fm_cat(ft_slice_pref, _make) +#define ft_slice_alloc fm_cat(ft_slice_pref, _alloc) +#define ft_slice_at fm_cat(ft_slice_pref, _at) +#define ft_slice_set fm_cat(ft_slice_pref, _set) +#define ft_slice_slice fm_cat(ft_slice_pref, _slice) +#define ft_slice_each fm_cat(ft_slice_pref, _each) +#define ft_slice_each_r fm_cat(ft_slice_pref, _each_r) + +#define ft_array_alloc fm_cat(ft_array_pref, _alloc) +#define ft_array_at fm_cat(ft_array_pref, _at) +#define ft_array_set fm_cat(ft_array_pref, _set) +#define ft_array_slice fm_cat(ft_array_pref, _slice) +#define ft_array_each fm_cat(ft_array_pref, _each) +#define ft_array_each_r fm_cat(ft_array_pref, _each_r) + +#define ft_array_ensure fm_cat(ft_array_pref, _ensure) +#define ft_array_recapa fm_cat(ft_array_pref, _recapa) +#define ft_array_resize fm_cat(ft_array_pref, _resize) +#define ft_array_free fm_cat(ft_array_pref, _free) + +#define ft_array_insert_at fm_cat(ft_array_pref, _insert_at) +#define ft_array_insert_n fm_cat(ft_array_pref, _insert_n) +#define ft_array_push fm_cat(ft_array_pref, _push) +#define ft_array_push2 fm_cat(ft_array_pref, _push2) +#define ft_array_append fm_cat(ft_array_pref, _append) + +#define ft_array_del_at fm_cat(ft_array_pref, _del_at) +#define ft_array_del_slice fm_cat(ft_array_pref, _del_slice) +#define ft_array_pop fm_cat(ft_array_pref, _pop) + +#define ft_array_walk fm_cat(ft_array_pref, _walk) +#define ft_array_walk_r fm_cat(ft_array_pref, _walk_r) + +#define HUGE_SIZE ((uint64_t)UINT_MAX << 16) +#ifndef NDEBUG +/* try to catch uninitialized vars */ +#define ft_slice_invariants(slc) \ + ft_dbg_assert(ft_mul_size(sizeof(FT_SLICE_TYPE), slc->len) < HUGE_SIZE); \ + ft_dbg_assert((slc->len == 0) || (slc->ptr != NULL)) +#define ft_array_invariants(arr) \ + ft_dbg_assert(ft_mul_size(sizeof(FT_SLICE_TYPE), arr->len) < HUGE_SIZE); \ + ft_dbg_assert(ft_mul_size(sizeof(FT_SLICE_TYPE), arr->len) < HUGE_SIZE); \ + ft_dbg_assert(arr->cap >= arr->len); \ + ft_dbg_assert((arr->cap == 0) || (arr->ptr != NULL)) +#else +#define ft_slice_invariants(slc) \ + ft_dbg_assert((slc->len == 0) || (slc->ptr != NULL)) +#define ft_array_invariants(arr) \ + ft_dbg_assert(arr->cap >= arr->len); \ + ft_dbg_assert((arr->cap == 0) || (arr->ptr != NULL)) +#endif + +typedef struct ft_slice_type { + FT_SLICE_TYPE *ptr; + size_t len; +} ft_slice_type; + +typedef struct ft_array_type { + FT_SLICE_TYPE *ptr; + size_t len; + size_t cap; +} ft_array_type; + +ft_inline ft_slice_type +ft_slice_make(FT_SLICE_TYPE *ptr, size_t len) { + return (ft_slice_type){.ptr = ptr, .len = len}; +} + +ft_inline ft_slice_type +ft_slice_alloc(FT_SLICE_TYPE *ptr, size_t len) { + FT_SLICE_TYPE *newptr = ft_malloc_arr(sizeof(FT_SLICE_TYPE), len); + memcpy(newptr, ptr, sizeof(FT_SLICE_TYPE) * len); + return (ft_slice_type){.ptr = newptr, .len = len}; +} + +ft_inline FT_SLICE_TYPE +ft_slice_at(const ft_slice_type *sl, ssize_t at) { + ft_slice_invariants(sl); + at = ft__index_unify(at, sl->len); + return sl->ptr[at]; +} + +ft_inline FT_SLICE_TYPE +ft_slice_set(const ft_slice_type *sl, ssize_t at, FT_SLICE_TYPE val) { + ft_slice_invariants(sl); + at = ft__index_unify(at, sl->len); + sl->ptr[at] = val; + return val; +} + +ft_inline ft_slice_type +ft_slice_slice(const ft_slice_type *sl, ssize_t start, ssize_t end) { + ft_slice_invariants(sl); + start = ft__slcindex_unify(start, sl->len); + end = ft__slcindex_unify(end, sl->len); + ft_assert(start <= end); + return (ft_slice_type){.ptr = sl->ptr + start, .len = end - start}; +} + +ft_inline void +ft_slice_each_r(const ft_slice_type *sl, + void (*each)(FT_SLICE_TYPE el, ft_arg_t arg), + ft_arg_t arg) { + size_t i; + ft_slice_invariants(sl); + for (i = 0; i < sl->len; i++) { + each(sl->ptr[i], arg); + } +} + +ft_inline void +ft_slice_each(const ft_slice_type *sl, void (*each)(FT_SLICE_TYPE el)) { + size_t i; + ft_slice_invariants(sl); + for (i = 0; i < sl->len; i++) { + each(sl->ptr[i]); + } +} + +/* ARRAY */ + +ft_inline FT_SLICE_TYPE +ft_array_at(const ft_array_type *arr, ssize_t at) { + ft_array_invariants(arr); + at = ft__index_unify(at, arr->len); + return arr->ptr[at]; +} + +ft_inline FT_SLICE_TYPE +ft_array_set(const ft_array_type *arr, ssize_t at, FT_SLICE_TYPE val) { + ft_array_invariants(arr); + at = ft__index_unify(at, arr->len); + arr->ptr[at] = val; + return val; +} + +ft_inline ft_slice_type +ft_array_slice(const ft_array_type *arr, ssize_t start, ssize_t end) { + ft_array_invariants(arr); + start = ft__slcindex_unify(start, arr->len); + end = ft__slcindex_unify(end, arr->len); + ft_assert(start <= end); + return (ft_slice_type){.ptr = arr->ptr + start, .len = end - start}; +} + +ft_inline void +ft_array_each_r(const ft_array_type *arr, + void (*each)(FT_SLICE_TYPE el, ft_arg_t arg), + ft_arg_t arg) { + size_t i; + ft_array_invariants(arr); + for (i = 0; i < arr->len; i++) { + each(arr->ptr[i], arg); + } +} + +ft_inline void +ft_array_each(const ft_array_type *arr, void (*each)(FT_SLICE_TYPE el)) { + size_t i; + ft_array_invariants(arr); + for (i = 0; i < arr->len; i++) { + each(arr->ptr[i]); + } +} + +ft_inline void +ft_array_ensure(ft_array_type *arr, size_t sz) { + size_t newcap; + size_t newlen; + + ft_array_invariants(arr); + ft_assert(SIZE_MAX/2 - 1 - arr->len >= sz); + + newlen = arr->len + sz; + if (arr->cap >= newlen) + return; + + newcap = arr->cap ? arr->cap : 4; + while (newcap < newlen) + newcap *= 2; + + arr->ptr = ft_realloc_arr(arr->ptr, sizeof(FT_SLICE_TYPE), arr->cap, newcap); + arr->cap = newcap; +} + +ft_inline void +ft_array_recapa(ft_array_type *arr, size_t cap) { + size_t newcap; + + ft_array_invariants(arr); + ft_assert(cap >= arr->len); + ft_dbg_assert(SIZE_MAX/2 - 1 >= cap); + + newcap = (arr->cap && arr->cap <= cap) ? arr->cap : 4; + while (newcap < cap) + newcap *= 2; + + if (newcap == cap) + return; + + arr->ptr = ft_realloc_arr(arr->ptr, sizeof(FT_SLICE_TYPE), arr->cap, newcap); + arr->cap = newcap; +} + +ft_inline void +ft_array_resize(ft_array_type *arr, size_t len) { + ft_array_invariants(arr); + + if (len > arr->cap) + ft_array_recapa(arr, len); + + if (len < arr->len) { + memset(&arr->ptr[len], 0, sizeof(FT_SLICE_TYPE) * (arr->len - len)); + } else if (len > arr->len) { + memset(&arr->ptr[arr->len], 0, sizeof(FT_SLICE_TYPE) * (len - arr->len)); + } + + arr->len = len; + + if (arr->len < arr->cap / 4) + ft_array_recapa(arr, arr->len); +} + +ft_inline ft_array_type +ft_array_alloc(FT_SLICE_TYPE *ptr, size_t len) { + ft_array_type arr = {NULL, 0, 0}; + + if (len > 0) { + ft_array_ensure(&arr, len); + memcpy(arr.ptr, ptr, sizeof(FT_SLICE_TYPE) * len); + arr.len = len; + } + return arr; +} + +ft_inline void +ft_array_free(ft_array_type *arr) { + ft_array_invariants(arr); + ft_free(arr->ptr); + arr->ptr = 0; + arr->len = 0; + arr->cap = 0; +} + +ft_inline FT_SLICE_TYPE +ft_array_del_at(ft_array_type *arr, ssize_t at) { + FT_SLICE_TYPE el; + ft_array_invariants(arr); + + at = ft__index_unify(at, arr->len); + el = arr->ptr[at]; + if (at+1 < arr->len) { + memmove(&arr->ptr[at], &arr->ptr[at+1], sizeof(FT_SLICE_TYPE)*(arr->len-at-1)); + } + memset(&arr->ptr[arr->len-1], 0, sizeof(FT_SLICE_TYPE)); + arr->len--; + + if (arr->len < arr->cap / 4) + ft_array_recapa(arr, arr->len); + + return el; +} + +ft_inline void +ft_array_del_slice(ft_array_type *arr, ssize_t start, ssize_t end) { + ft_array_invariants(arr); + + start = ft__slcindex_unify(start, arr->len); + end = ft__slcindex_unify(end, arr->len); + ft_assert(end >= start); + if (end == start) + return; + + if (end < arr->len) { + memmove(&arr->ptr[start], &arr->ptr[end], sizeof(FT_SLICE_TYPE)*(arr->len-end)); + } + + memset(&arr->ptr[arr->len-(end-start)], 0, sizeof(FT_SLICE_TYPE)*(end-start)); + arr->len -= end-start; + + if (arr->len < arr->cap / 4) + ft_array_recapa(arr, arr->len); +} + +ft_inline FT_SLICE_TYPE +ft_array_pop(ft_array_type *arr) { + FT_SLICE_TYPE el; + ft_array_invariants(arr); + + el = arr->ptr[arr->len-1]; + memset(&arr->ptr[arr->len-1], 0, sizeof(FT_SLICE_TYPE)); + arr->len--; + + if (arr->len < arr->cap / 4) + ft_array_recapa(arr, arr->len); + + return el; +} + +ft_inline void +ft_array_insert_at(ft_array_type *arr, ssize_t at, FT_SLICE_TYPE el) { + ft_array_invariants(arr); + at = ft__slcindex_unify(at, arr->len); + ft_array_ensure(arr, 1); + if (at != arr->len) + memmove(&arr->ptr[at+1], &arr->ptr[at], sizeof(FT_SLICE_TYPE) * (arr->len - at)); + arr->ptr[at] = el; + arr->len++; +} + +ft_inline void +ft_array_push(ft_array_type *arr, FT_SLICE_TYPE el) { + ft_array_invariants(arr); + ft_array_ensure(arr, 1); + arr->ptr[arr->len] = el; + arr->len++; +} + +ft_inline void +ft_array_push2(ft_array_type *arr, FT_SLICE_TYPE el1, FT_SLICE_TYPE el2) { + ft_array_invariants(arr); + ft_array_ensure(arr, 2); + arr->ptr[arr->len+0] = el1; + arr->ptr[arr->len+1] = el2; + arr->len+=2; +} + +ft_inline void +ft_array_insert_n(ft_array_type *arr, ssize_t at, FT_SLICE_TYPE *el, size_t n) { + bool alloced = false; + ft_array_invariants(arr); + + at = ft__slcindex_unify(at, arr->len); + + if (el + n >= arr->ptr && el < arr->ptr + arr->len) { + /* oops, overlap. + * Since we could reallocate array, we have to copy to allocated place */ + FT_SLICE_TYPE *cpy; + cpy = ft_malloc_arr(sizeof(FT_SLICE_TYPE), n); + memcpy(cpy, el, sizeof(FT_SLICE_TYPE) * n); + el = cpy; + alloced = true; + } + + ft_array_ensure(arr, n); + + if (at != arr->len) + memmove(&arr->ptr[at+n], &arr->ptr[at], + sizeof(FT_SLICE_TYPE) * (arr->len - at)); + memmove(&arr->ptr[at], el, sizeof(FT_SLICE_TYPE) * n); + arr->len += n; + + if (alloced) + ft_free(el); +} + +ft_inline void +ft_array_append(ft_array_type *arr, FT_SLICE_TYPE *el, size_t n) { + ft_array_invariants(arr); + ft_array_insert_n(arr, arr->len, el, n); +} + +ft_inline void +ft_array_walk_r(ft_array_type *arr, + FT_WALK_ACT (*walk)(FT_SLICE_TYPE *el, ft_arg_t arg), + ft_arg_t arg) { + size_t i, j = 0; + FT_WALK_ACT act = FT_WALK_CONT; + ft_array_invariants(arr); + + for (i = 0; i < arr->len && (act & FT_WALK_BREAK) == 0; i++) { + act = walk(&arr->ptr[i], arg); + if ((act & FT_WALK_DEL) == 0) { + if (i != j) + arr->ptr[j] = arr->ptr[i]; + j++; + } + } + /* move tail */ + if (i != arr->len) { + if (i != j) { + memmove(&arr->ptr[j], &arr->ptr[i], sizeof(FT_SLICE_TYPE)*(arr->len - i)); + } + j += arr->len - i; + } + + /* set length */ + if (j != arr->len) { + memset(&arr->ptr[j], 0, sizeof(FT_SLICE_TYPE)*(arr->len - j)); + arr->len = j; + if (arr->len < arr->cap / 4) + ft_array_recapa(arr, arr->len); + } +} + +ft_inline void +ft_array_walk(ft_array_type *arr, FT_WALK_ACT (*walk)(FT_SLICE_TYPE *el)) { + ft_array_walk_r(arr, (FT_WALK_ACT (*)(FT_SLICE_TYPE*, ft_arg_t))walk, ft_mka_z()); +} + +#undef FT_SLICE +#undef FT_SLICE_TYPE + +#undef ft_slice_pref +#undef ft_array_pref +#undef ft_slice_type +#undef ft_array_type + +#undef ft_slice_make +#undef ft_slice_alloc +#undef ft_slice_at +#undef ft_slice_set +#undef ft_slice_slice +#undef ft_slice_each +#undef ft_slice_each_r + +#undef ft_array_alloc +#undef ft_array_at +#undef ft_array_set +#undef ft_array_slice +#undef ft_array_each +#undef ft_array_each_r + +#undef ft_array_ensure +#undef ft_array_recapa +#undef ft_array_resize +#undef ft_array_free + +#undef ft_array_insert_at +#undef ft_array_insert_n +#undef ft_array_push +#undef ft_array_push2 +#undef ft_array_append + +#undef ft_array_del_at +#undef ft_array_del_slice +#undef ft_array_pop + +#undef ft_array_walk +#undef ft_array_walk_r + +#undef HUGE_SIZE +#undef ft_slice_invariants +#undef ft_array_invariants + diff --git a/src/fu_util/ft_search.inc.h b/src/fu_util/ft_search.inc.h new file mode 100644 index 000000000..432554c78 --- /dev/null +++ b/src/fu_util/ft_search.inc.h @@ -0,0 +1,113 @@ +/* + * Sort template. + * Accepts four macrosses: + * - FT_SEARCH - suffix for functions + * - FT_SEARCH_TYPE - type of array element + * - FT_SEARCH_PATTERN - optional type of key element. Defined to FT_SEARCH_TYPE if not present + * + * Produces: + * + * + * + * - binary search function + * It returns index of first element that is equal of greater to pattern in + * a sorted array. + * + * ft_bsres_t + * ft_bsearch_FT_SEARCH( + * FT_SEARCH_TYPE *arr, size_t len, FT_SEARCH_PATTERN pat, + * FT_CMP_RES (*cmp)(FT_SEARCH_TYPE el, FT_SEARCH_PATTERN pat)); + * ft_bsres_t + * ft_bsearch_FT_SEARCH_r( + * FT_SEARCH_TYPE *arr, size_t len, FT_SEARCH_PATTERN pat, + * FT_CMP_RES (*cmp)(FT_SEARCH_TYPE el, FT_SEARCH_PATTERN pat, ft_arg_t arg), + * ft_arg_t arg); + * + * - linear search function + * It returns index of first element that matches predicate, or len. + * + * size_t + * ft_search_FT_SEARCH( + * FT_SEARCH_TYPE *arr, size_t len, FT_SEARCH_PATTERN pat, + * FT_CMP_RES (*eq)(FT_SEARCH_TYPE el, FT_SEARCH_PATTERN pat)) + * or + * size_t + * ft_search_FT_SEARCH( + * FT_SEARCH_TYPE *arr, size_t len, FT_SEARCH_PATTERN pat, + * FT_CMP_RES (*eq)(FT_SEARCH_TYPE el, FT_SEARCH_PATTERN pat, ft_arg_t arg), + * ft_arg_t arg) + * + */ + +#include + +#define ft_func_bsearch fm_cat(ft_bsearch_, FT_SEARCH) +#define ft_func_bsearch_r fm_cat3(ft_bsearch_, FT_SEARCH, _r) +#define ft_func_search fm_cat(ft_search_, FT_SEARCH) +#define ft_func_search_r fm_cat3(ft_search_, FT_SEARCH, _r) +#ifndef FT_SEARCH_PATTERN +#define FT_SEARCH_PATTERN FT_SEARCH_TYPE +#endif + +#define _ft_cmp_def_r(x) \ + FT_CMP_RES (*x)(FT_SEARCH_TYPE a, FT_SEARCH_PATTERN b, ft_arg_t arg) +#define _ft_cmp_def(x) \ + FT_CMP_RES (*x)(FT_SEARCH_TYPE a, FT_SEARCH_PATTERN b) + +ft_inline ft_bsres_t +ft_func_bsearch_r(FT_SEARCH_TYPE *arr, size_t len, FT_SEARCH_PATTERN pat, + _ft_cmp_def_r(cmp), ft_arg_t arg) { + ft_bsres_t res = {len, false}; + size_t l, r, m; + int cmpres; + l = 0; + r = len; + while (l < r) { + m = l + (r - l) / 2; + cmpres = cmp(arr[m], pat, arg); + if (cmpres >= FT_CMP_EQ) { + r = m; + res.eq = cmpres == FT_CMP_EQ; + } else { + l = m + 1; + } + } + res.ix = l; + return res; +} + +ft_inline ft_bsres_t +ft_func_bsearch(FT_SEARCH_TYPE *arr, size_t len, FT_SEARCH_PATTERN pat, + _ft_cmp_def(cmp)) { + return ft_func_bsearch_r(arr, len, pat, (_ft_cmp_def_r()) cmp, ft_mka_z()); +} + +ft_inline size_t +ft_func_search_r(FT_SEARCH_TYPE *arr, size_t len, FT_SEARCH_PATTERN pat, + _ft_cmp_def_r(cmp), ft_arg_t arg) { + size_t i; + for (i = 0; i < len; i++) { + if (cmp(arr[i], pat, arg) == FT_CMP_EQ) + break; + } + return i; +} + +ft_inline size_t +ft_func_search(FT_SEARCH_TYPE *arr, size_t len, FT_SEARCH_PATTERN pat, + _ft_cmp_def(cmp)) { + return ft_func_search_r(arr, len, pat, (_ft_cmp_def_r()) cmp, ft_mka_z()); +} + +#undef FT_SEARCH +#undef FT_SEARCH_TYPE +#undef FT_SEARCH_PATTERN +#ifdef FT_SEARCH_ARG +#undef FT_SEARCH_ARG +#endif +#undef ft_func_bsearch +#undef ft_func_bsearch_r +#undef ft_func_search +#undef ft_func_search_r +#undef _ft_cmp_def_r +#undef _ft_cmp_def diff --git a/src/fu_util/ft_sort.inc.h b/src/fu_util/ft_sort.inc.h new file mode 100644 index 000000000..a4eb22d2d --- /dev/null +++ b/src/fu_util/ft_sort.inc.h @@ -0,0 +1,174 @@ +/* + * Sort template. + * Accepts three macrosses: + * - FT_SORT - suffix for functions + * - FT_SORT_TYPE - type of array element + * - FT_SORT_ARG - optionally - argument to comparison function. + * + * Produces: + * + * - shell sort function + * void ft_shsort_FT_SORT(FT_SORT_TYPE *arr, size_t len, + * int (*cmp)(FT_SORT_TYPE a, FT_SORT_TYPE b)) + * void ft_shsort_FT_SORT_r(FT_SORT_TYPE *arr, size_t len, + * int (*cmp)(FT_SORT_TYPE a, FT_SORT_TYPE b, ft_arg_t arg), + * ft_arg_t arg) + * + * - quick sort function + * void ft_qsort_FT_SORT(FT_SORT_TYPE *arr, size_t len, + * int (*cmp)(FT_SORT_TYPE a, FT_SORT_TYPE b)) + * void ft_qsort_FT_SORT_r(FT_SORT_TYPE *arr, size_t len, + * int (*cmp)(FT_SORT_TYPE a, FT_SORT_TYPE b, ft_arg_t arg), + * ft_arg_t arg) + */ + +#include + +#ifndef FT_SORT +#error "FT_SORT should be defined" +#endif + +#ifndef FT_SORT_TYPE +#error "FT_SORT_TYPE should be defined" +#endif + +#define ft_func_shsort fm_cat(ft_shsort_, FT_SORT) +#define ft_func_shsort_r fm_cat3(ft_shsort_, FT_SORT, _r) +#define ft_func_qsort fm_cat(ft_qsort_, FT_SORT) +#define ft_func_qsort_r fm_cat3(ft_qsort_, FT_SORT, _r) + +#define _ft_cmp_def_r(x) int (*x)(FT_SORT_TYPE a, FT_SORT_TYPE b, ft_arg_t arg) +#define _ft_cmp_def(x) int (*x)(FT_SORT_TYPE a, FT_SORT_TYPE b) + +ft_inline ft_optimize3 void +ft_func_shsort_r(FT_SORT_TYPE *arr, size_t len, _ft_cmp_def_r(cmp), ft_arg_t arg) { + FT_SORT_TYPE el; + size_t m, n, d; + ft_dbg_assert((ssize_t)len >= 0); + if (len < 2) {} + else if (len == 2) { + if (cmp(arr[1], arr[0], arg) < 0) { + ft_swap(&arr[1], &arr[0]); + } + } else { + d = (size_t)(len / 1.4142135) | 1; + for (;;) { + for (m = d; m < len; m++) { + n = m; + el = arr[n]; + for (; n >= d && cmp(el, arr[n - d], arg) < 0; n -= d) { + arr[n] = arr[n-d]; + } + arr[n] = el; + } + if (d == 1) break; + else if (d < 10) d = 1; + else if (d <= 24) d = (size_t)(d / 2.221); + else d = (size_t)(d / 2.7182818) | 1; + } + } +} + +ft_inline ft_optimize3 void +ft_func_shsort(FT_SORT_TYPE *arr, size_t len, _ft_cmp_def(cmp)) { + ft_func_shsort_r(arr, len, (_ft_cmp_def_r()) cmp, ft_mka_z()); +} + +ft_inline ft_optimize3 void +ft_func_qsort_r(FT_SORT_TYPE *arr_, size_t len_, _ft_cmp_def_r(cmp), ft_arg_t arg) { + FT_SORT_TYPE *arr = arr_; + FT_SORT_TYPE pivot; + size_t len = len_; + size_t m, n, mid[5]; +#define STSZ 32 + int const stsz = STSZ; + int sttop = 0; + struct { FT_SORT_TYPE *ar; size_t ln; } stack[STSZ]; +#undef STSZ + + ft_dbg_assert((ssize_t)len >= 0); + + stack[sttop].ar = arr; stack[sttop].ln = len; sttop++; + while (sttop > 0) { + sttop--; + arr = stack[sttop].ar; len = stack[sttop].ln; + /* check for fallback to shell sort */ + if (len < 24 || (sttop == stsz-1)) { + ft_func_shsort_r(arr, len, cmp, arg); + continue; + } + else { + m = 1; + while (m < len && cmp(arr[m-1], arr[m], arg) < 0) m++; + if (m == len) + continue; + } + /* find a pivot as median of 5 */ + mid[0] = 0; + mid[2] = len/2; + mid[1] = 1 + ft_randn(mid[2]-2); + mid[3] = mid[2] + 1 + ft_randn(mid[2]-2); + mid[4] = len-1; + /* fast median of 5 */ + { + static int const ix[] = {0, 1, 3, 4, 0, 3, 1, 4, 1, 2, 2, 3, 1, 2}; + for (int i = 0; i < ft_arrsz(ix); i += 2) { + if (cmp(arr[mid[ix[i]]], arr[mid[ix[i+1]]], arg) < 0) + ft_swap(&mid[ix[i]], &mid[ix[i+1]]); + } + } + /* make a[i] <= a[l] if i < m */ + pivot = arr[mid[2]]; + m = 0; n = len; + for (;;) { + while (m < n && cmp(pivot, arr[m], arg) >= 0) m++; + while (m < n && cmp(pivot, arr[n-1], arg) < 0) n--; + if (m == n) break; + ft_swap(&arr[m], &arr[n-1]); + m++; n--; + } + if (m < len) { + /* lgr - left greater */ + bool lgr = m > len - m; + stack[sttop+(1-lgr)].ar = arr; + stack[sttop+(1-lgr)].ln = m; + stack[sttop+lgr].ar = arr + m; + stack[sttop+lgr].ln = len - m; + sttop += 2; + } else { + /* all <= pivot */ + /* make a[i] < a[l] if i < m*/ + ft_dbg_assert(n == len); + for (;m > 0 && cmp(arr[m-1], pivot, arg) >= 0; m--); + n = m; + for (;m > 0;m--) { + if (cmp(arr[m-1], pivot, arg) >= 0) { + if (m < n) { + ft_swap(&arr[m-1], &arr[n-1]); + } + n--; + } + } + if (n > 0) { + stack[sttop].ar = arr; stack[sttop].ln = n; + sttop++; + } + } + } +} + +ft_inline ft_optimize3 void +ft_func_qsort(FT_SORT_TYPE *arr, size_t len, _ft_cmp_def(cmp)) { + ft_func_qsort_r(arr, len, (_ft_cmp_def_r()) cmp, ft_mka_z()); +} + + +#undef FT_SORT +#undef FT_SORT_TYPE +#undef ft_func_shsort +#undef ft_func_shsort_r +#undef ft_func_qsort +#undef ft_func_qsort_r +#undef _ft_cmp_def_r +#undef _ft_cmp_def + diff --git a/src/fu_util/ft_ss_examples.h b/src/fu_util/ft_ss_examples.h new file mode 100644 index 000000000..a44c38856 --- /dev/null +++ b/src/fu_util/ft_ss_examples.h @@ -0,0 +1,131 @@ +/* vim: set expandtab autoindent cindent ts=4 sw=4 sts=4 */ +#ifndef FT_SS_EXAMPLES_H +#define FT_SS_EXAMPLES_H + +/* + * Sort for integers. + * Defines: + * void + * ft_shsort_int (int *arr, size_t len, int (*cmp)(int, int)); + * void + * ft_qsort_int (int *arr, size_t len, int (*cmp)(int, int)); + * void + * ft_shsort_int_r(int *arr, size_t len, + * int (*cmp)(int, int, ft_arg_t), + * ft_arg_t); + * void + * ft_qsort_int_r (int *arr, size_t len, + * int (*cmp)(int, int, ft_arg_t), + * ft_arg_t); + */ +#define FT_SORT int +#define FT_SORT_TYPE int +#include +ft_inline FT_CMP_RES ft_int_cmp(int a, int b) { return ft_cmp(a, b); } + +/* + * Sort for strings. + * Defines: + * void + * ft_shsort_cstr (const char **arr, size_t len, + * int (*cmp)(const char*, const char*)); + * void + * ft_qsort_cstr (const char **arr, size_t len, + * int (*cmp)(const char*, const char*, ft_arg_t), + * ft_arg_t); + * void + * ft_shsort_cstr_r(const char **arr, size_t len, + * int (*cmp)(const char*, const char*)); + * void + * ft_qsort_cstr_r (const char **arr, size_t len, + * int (*cmp)(const char*, const char*, ft_arg_t), + * ft_arg_t); + */ +#define FT_SORT cstr +#define FT_SORT_TYPE const char* +#include +/* + * While we could pass raw strcmp to sort and search functions, + * lets define wrapper for clarity + */ +ft_inline FT_CMP_RES ft_cstr_cmp(const char *a, const char *b) { + return ft_cmp(strcmp(a, b), 0); +} + +/* + * Sort for void*. + * Defines: + * void + * ft_shsort_void (void **arr, size_t len, + * int (*cmp)(void*, void*)); + * void + * ft_qsort_void (void **arr, size_t len, + * int (*cmp)(void*, void*, ft_arg_t), + * ft_arg_t); + * void + * ft_shsort_void_r(void **arr, size_t len, + * int (*cmp)(void*, void*)); + * void + * ft_qsort_void_r (void **arr, size_t len, + * int (*cmp)(void*, void*, ft_arg_t), + * ft_arg_t); + */ +#define FT_SORT void +#define FT_SORT_TYPE void* +#include + +/* + * Search for integers. + * Defines: + * ft_bsres_t + * ft_bsearch_int (int *arr, size_t len, + * int (*cmp)(int, int)) + * ft_bsres_t + * ft_bsearch_int_r(int *arr, size_t len, + * int (*cmp)(int, int, ft_arg_t), + * ft_arg_t) + * size_t + * ft_qsort_int (int *arr, size_t len, + * bool (*eq)(int, int)) + * ft_qsort_int_r(int *arr, size_t len, + * bool (*eq)(int, int, ft_arg_t), + * ft_arg_t) + */ +#define FT_SEARCH int +#define FT_SEARCH_TYPE int +#include + +/* + * Search for strings. + * Defines: + * ft_bsres_t + * ft_bsearch_cstr (const char **arr, size_t len, + * int (*cmp)(const char*, const char*)) + * ft_bsres_t + * ft_bsearch_cstr_r(const char **arr, size_t len, + * int (*cmp)(const char*, const char*, ft_arg_t), + * ft_arg_t) + * size_t + * ft_qsort_cstr (const char **arr, size_t len, + * int (*eq)(const char*, const char*)) + * ft_qsort_cstr_r(const char **arr, size_t len, + * int (*eq)(const char*, const char*, ft_arg_t), + * ft_arg_t) + */ +#define FT_SEARCH cstr +#define FT_SEARCH_TYPE const char* +#include + +/* + * Search for void*. + * Defines: + * ft_bsres_t ft_bsearch_void(void **arr, size_t len, + * int (*cmp)(void*, void*)) + * ft_bsres_t ft_search_void(void **arr, size_t len, + * bool (*eq)(void*, void*)) + */ +#define FT_SEARCH void +#define FT_SEARCH_TYPE void* +#include + +#endif /* FT_SS_EXAMPLES_H */ diff --git a/src/fu_util/ft_util.h b/src/fu_util/ft_util.h new file mode 100644 index 000000000..d9eaeb881 --- /dev/null +++ b/src/fu_util/ft_util.h @@ -0,0 +1,441 @@ +/* vim: set expandtab autoindent cindent ts=4 sw=4 sts=4 */ +#ifndef FU_UTIL_H +#define FU_UTIL_H + +#include +#include +#include +#include +#include +/* trick to find ssize_t even on windows and strict ansi mode */ +#if defined(_MSC_VER) +#include +typedef SSIZE_T ssize_t; +#else +#include +#endif +#include +#include +#include +#include + + +#ifdef __GNUC__ +#define ft_gcc_const __attribute__((const)) +#define ft_gcc_pure __attribute__((pure)) +#if __GNUC__ > 10 +#define ft_gcc_malloc(free, idx) __attribute__((malloc, malloc(free, idx))) +#else +#define ft_gcc_malloc(free, idx) __attribute__((malloc)) +#endif +#define ft_unused __attribute__((unused)) +#define ft_gnu_printf(fmt, arg) __attribute__((format(printf,fmt,arg))) +#define ft_likely(x) __builtin_expect(!!(x), 1) +#define ft_unlikely(x) __builtin_expect(!!(x), 0) +#else +#define ft_gcc_const +#define ft_gcc_pure +#define ft_gcc_malloc(free, idx) +#define ft_unused +#define ft_gnu_printf(fmt, arg) +#define ft_likely(x) (x) +#define ft_unlikely(x) (x) +#endif +#define ft_inline static ft_unused inline + +#if defined(__GNUC__) && !defined(__clang__) +#define ft_optimize3 __attribute__((optimize(3))) +#else +#define ft_optimize3 +#endif + +#if __STDC_VERSION__ >= 201112L +#elif defined(__GNUC__) && !defined(_Noreturn) +#define _Noreturn __attribute__((__noreturn__)) +#elif !defined(_Noreturn) +#define _Noreturn +#endif + +/* Logging and asserts */ + +#if defined(__GNUC__) && !defined(__clang__) +#define ft_FUNC __PRETTY_FUNCTION__ +#else +#define ft_FUNC __func__ +#endif + +typedef struct ft_source_position { + const char *file; + int line; + const char *func; +} ft_source_position_t; + +#define ft__srcpos() ((ft_source_position_t){.file=__FILE__,.line=__LINE__,.func=ft_FUNC}) + +enum FT_LOG_LEVEL { + FT_UNINITIALIZED = -100, + FT_DEBUG = -2, + FT_LOG = -1, + FT_INFO = 0, + FT_WARNING = 1, + FT_ERROR = 2, + FT_OFF = 3, + FT_FATAL = 98, + FT_TRACE = 100 /* for active debugging only */ +}; + +enum FT_ASSERT_LEVEL { FT_ASSERT_RUNTIME = 0, FT_ASSERT_ALL }; + +ft_inline const char* ft_log_level_str(enum FT_LOG_LEVEL level); + +/* + * Hook type to plugin external logging. + * Default loggin writes to stderr only. + */ +typedef void ft_gnu_printf(4, 0) (*ft_log_hook_t)(enum FT_LOG_LEVEL, + ft_source_position_t srcpos, + const char* error, + const char *fmt, + va_list args); +/* + * Initialize logging in main executable file. + * Pass custom hook or NULL. + */ +#define ft_init_log(hook) ft__init_log(hook, __FILE__) + +/* Reset log level for all files */ +extern void ft_log_level_reset(enum FT_LOG_LEVEL level); +extern void ft_assert_level_reset(enum FT_ASSERT_LEVEL level); +/* Adjust log level for concrete file or all files */ +extern void ft_log_level_set(const char *file, enum FT_LOG_LEVEL level); +extern void ft_assert_level_set(const char *file, enum FT_ASSERT_LEVEL level); + +/* truncates filename to source root */ +const char* ft__truncate_log_filename(const char *file); + +/* register source for fine tuned logging */ +#define ft_register_source() ft__register_source_impl() + +/* log simple message */ +#define ft_log(level, fmt_or_msg, ...) \ + ft__log_impl(level, NULL, fmt_or_msg, __VA_ARGS__) +/* log message with error. Error will be appended as ": %s". */ +#define ft_logerr(level, error, fmt_or_msg, ...) \ + ft__log_impl(level, error, fmt_or_msg, __VA_ARGS__) + +/* + * Assertions uses standard logging for output. + * Assertions are runtime enabled: + * - ft_assert is enabled always. + * - ft_dbg_assert is disabled be default, but will be enabled if `ft_assert_level` is set positive. + */ + +#define ft_dbg_enabled() ft__dbg_enabled() +#define ft_dbg_assert(x, ...) ft__dbg_assert(x, #x, __VA_ARGS__) +#define ft_assert(x, ...) ft__assert(x, #x, __VA_ARGS__) +#define ft_assyscall(syscall, ...) ft__assyscall(syscall, fm_uniq(res), __VA_ARGS__) + +/* threadsafe strerror */ +extern const char* ft__strerror(int eno, char *buf, size_t len); +#ifndef __TINYC__ +extern const char* ft_strerror(int eno); +#else +#define ft_strerror(eno) ft__strerror(eno, (char[256]){0}, 256) +#endif + +// Memory + +// Standartize realloc(p, 0) +// Realloc implementations differ in handling newsz == 0 case: +// some returns NULL, some returns unique allocation. +// This version always returns NULL. +extern void* ft_realloc(void* ptr, size_t new_sz); +extern void* ft_calloc(size_t sz); +extern void* ft_realloc_arr(void* ptr, size_t elem_sz, size_t old_elems, size_t new_elems); + +#define ft_malloc(sz) ft_realloc(NULL, (sz)) +#define ft_malloc_arr(sz, cnt) ft_realloc(NULL, ft_mul_size((sz), (cnt))) +#define ft_free(ptr) ft_realloc((ptr), 0) +#define ft_calloc_arr(sz, cnt) ft_calloc(ft_mul_size((sz), (cnt))) + +extern void ft_set_allocators( + void *(*_realloc)(void *, size_t), + void (*_free)(void*)); + +/* overflow checking size addition and multiplication */ +ft_inline size_t ft_add_size(size_t a, size_t b); +ft_inline size_t ft_mul_size(size_t a, size_t b); + +#define ft_new(type) ft_calloc(sizeof(type)) +#define ft_newar(type, cnt) ft_calloc(ft_mul_size(sizeof(type), (cnt))) + +// Function to clear freshly allocated memory +extern void ft_memzero(void* ptr, size_t sz); + +// Comparison + +/* ft_max - macro-safe calculation of maximum */ +#define ft_max(a_, b_) ft__max((a_), (b_), fm_uniq(a), fm_uniq(b)) +/* ft_min - macro-safe calculation of minimum */ +#define ft_min(a_, b_) ft__min((a_), (b_), fm_uniq(a), fm_uniq(b)) + +/* Well, it is a bit fake enum. */ +typedef enum FT_CMP_RES { + FT_CMP_LT = -1, + FT_CMP_EQ = 0, + FT_CMP_GT = 1, + FT_CMP_NE = 2, +} FT_CMP_RES; +/* ft_cmp - macro-safe comparison */ +#define ft_cmp(a_, b_) ft__cmp((a_), (b_), fm_uniq(a), fm_uniq(b)) +/* ft_swap - macro-safe swap of variables */ +#define ft_swap(a_, b_) ft__swap((a_), (b_), fm_uniq(ap), fm_uniq(bp), fm_uniq(t)) + +/* ft_arrsz - geterminze size of static array */ +#define ft_arrsz(ar) (sizeof(ar)/sizeof(ar[0])) + +/* used in ft_*_foreach iterations to close implicit scope */ +#define ft_end_foreach } while(0) + +// Some Numeric Utils + +ft_inline uint32_t ft_rol32(uint32_t x, unsigned n); +ft_inline uint32_t ft_ror32(uint32_t x, unsigned n); +ft_inline size_t ft_nextpow2(size_t sz); + +/* + * Simple inline murmur hash implementation hashing a 32 bit integer, for + * performance. + */ +ft_inline uint32_t ft_mix32(uint32_t data); + + +/* Dumb quality random */ +extern uint32_t ft_rand(void); +/* Dumb quality random 0<=ralloced = false` and you will duplicate string in your own way. + */ +ft_inline ft_str_t ft_strbuf_ref(ft_strbuf_t *buf); + +/* + * Free buffer's buffer, if it was allocated + */ +ft_inline void ft_strbuf_free(ft_strbuf_t *buf); + +/* + * Always return allocated string. + * If buffer wasn't empty, returns it's ptr intact. + * If buffer was empty, allocate 1 bytes string with zero end + * Meaningless with fixed non-allocated buffer if you don't want to allocate. + * + * Buffer fields are cleared, therefore it will be unusable after. + * You will have to initialize it again. + */ +ft_inline ft_str_t ft_strbuf_steal(ft_strbuf_t *buf); + +#include "./impl/ft_impl.h" + +/* Include some examples for search and sort usages */ +//#include "./ft_ss_examples.h" +//#include "./ft_ar_examples.h" + +#endif diff --git a/src/fu_util/fu_utils_cfg.h b/src/fu_util/fu_utils_cfg.h new file mode 100644 index 000000000..e9b0ec5ca --- /dev/null +++ b/src/fu_util/fu_utils_cfg.h @@ -0,0 +1,9 @@ +#ifndef FU_UTILS_FU_UTILS_CFG_H +#define FU_UTILS_FU_UTILS_CFG_H + +#define FU_UTILS_VERSION_MAJOR 0 +#define FU_UTILS_VERSION_MINOR 1 + +//#undef HAVE_LIBBACKTRACE + +#endif //FU_UTILS_FU_UTILS_CFG_H diff --git a/src/fu_util/fu_utils_cfg.h.in b/src/fu_util/fu_utils_cfg.h.in new file mode 100644 index 000000000..f5b3c30af --- /dev/null +++ b/src/fu_util/fu_utils_cfg.h.in @@ -0,0 +1,9 @@ +#ifndef FU_UTILS_FU_UTILS_H_IN +#define FU_UTILS_FU_UTILS_H_IN + +#define FU_UTILS_VERSION_MAJOR @fu_utils_VERSION_MAJOR@ +#define FU_UTILS_VERSION_MINOR @fu_utils_VERSION_MINOR@ + +//#cmakedefine HAVE_LIBBACKTRACE + +#endif diff --git a/src/fu_util/impl/fo_impl.c b/src/fu_util/impl/fo_impl.c new file mode 100644 index 000000000..89dacc423 --- /dev/null +++ b/src/fu_util/impl/fo_impl.c @@ -0,0 +1,1389 @@ +/* vim: set expandtab autoindent cindent ts=4 sw=4 sts=4 */ +#include +#include +#include +#include +#include + +#ifdef WIN32 +#define __thread __declspec(thread) +#endif +#include + +#include +#include + +/* + * We limits total number of methods, klasses and method implementations. + * Restricted number allows to use uint16_t for id and doesn't bother with + * smart structures for hashes. + * If you need more, you have to change the way they are stored. + */ +#define FOBJ_OBJ_MAX_KLASSES (1<<10) +#define FOBJ_OBJ_MAX_METHODS (1<<10) +#define FOBJ_OBJ_MAX_METHOD_IMPLS (1<<15) + +enum { FOBJ_DISPOSING = 1, FOBJ_DISPOSED = 2 }; + +typedef enum { + FOBJ_RT_NOT_INITIALIZED, + FOBJ_RT_INITIALIZED, + FOBJ_RT_FROZEN +} FOBJ_GLOBAL_STATE; + +typedef struct fobj_header { +#ifndef NDEBUG +#define FOBJ_HEADER_MAGIC UINT64_C(0x1234567890abcdef) + uint64_t magic; +#endif + volatile uint32_t rc; + volatile uint16_t flags; + fobj_klass_handle_t klass; +} fobj_header_t; + +#define METHOD_PARTITIONS (16) + +typedef struct fobj_klass_registration { + const char *name; + uint32_t hash; + uint32_t hash_next; + + ssize_t size; + fobj_klass_handle_t parent; + + uint32_t nmethods; + + /* common methods */ + fobj__nm_impl_t(fobjDispose) dispose; + + volatile uint16_t method_lists[METHOD_PARTITIONS]; +} fobj_klass_registration_t; + +typedef struct fobj_method_registration { + const char *name; + uint32_t hash; + uint32_t hash_next; + + uint32_t nklasses; + volatile uint32_t first; +} fobj_method_registration_t; + +typedef struct fobj_method_impl { + uint16_t method; + uint16_t klass; + uint16_t next_for_method; + uint16_t next_for_klass; + void* impl; +} fobj_method_impl_t; + + +static fobj_klass_registration_t fobj_klasses[1<<10] = {{0}}; +static fobj_method_registration_t fobj_methods[1<<10] = {{0}}; +#define FOBJ_OBJ_HASH_SIZE (FOBJ_OBJ_MAX_METHODS/4) +static volatile uint16_t fobj_klasses_hash[FOBJ_OBJ_HASH_SIZE] = {0}; +static volatile uint16_t fobj_methods_hash[FOBJ_OBJ_HASH_SIZE] = {0}; +static fobj_method_impl_t fobj_method_impl[FOBJ_OBJ_MAX_METHOD_IMPLS] = {{0}}; +static volatile uint32_t fobj_klasses_n = 0; +static volatile uint32_t fobj_methods_n = 0; +static volatile uint32_t fobj_impls_n = 0; + +static fobj_t fobj_autorelease(fobj_t obj, fobj_autorelease_pool *pool); +static void fobj_release(fobj_t self); +static fobj_autorelease_pool** fobj_AR_current_ptr(void); + +static pthread_mutex_t fobj_runtime_mutex = PTHREAD_MUTEX_INITIALIZER; +static volatile uint32_t fobj_global_state = FOBJ_RT_NOT_INITIALIZED; + +#define pth_assert(...) do { \ + int rc = __VA_ARGS__; \ + ft_assert(!rc, "fobj_runtime_mutex: %s", ft_strerror(rc)); \ +} while(0) + +#define atload(v) __atomic_load_n((v), __ATOMIC_ACQUIRE) + +bool +fobj_method_init_impl(volatile fobj_method_handle_t *meth, const char *name) { + uint32_t hash, mh; + fobj_method_registration_t *reg; + + ft_dbg_assert(meth); + + pth_assert(pthread_mutex_lock(&fobj_runtime_mutex)); + if ((mh = *meth) != 0) { + reg = &fobj_methods[mh]; + pth_assert(pthread_mutex_unlock(&fobj_runtime_mutex)); + ft_assert(mh <= atload(&fobj_methods_n)); + ft_assert(strcmp(reg->name, name) == 0); + return true; + } + + + hash = ft_small_cstr_hash(name); + mh = fobj_methods_hash[hash % FOBJ_OBJ_HASH_SIZE]; + for (; mh != 0; mh = reg->hash_next) { + reg = &fobj_methods[mh]; + if (reg->hash == hash && strcmp(reg->name, name) == 0) { + __atomic_store_n(meth, mh, __ATOMIC_RELEASE); + pth_assert(pthread_mutex_unlock(&fobj_runtime_mutex)); + return true; + } + } + + ft_assert(fobj_global_state == FOBJ_RT_INITIALIZED); + + mh = fobj_methods_n + 1; + ft_dbg_assert(mh > 0); + ft_assert(*meth < FOBJ_OBJ_MAX_METHODS, "Too many methods defined"); + reg = &fobj_methods[mh]; + reg->name = name; + reg->hash = hash; + reg->hash_next = fobj_methods_hash[hash % FOBJ_OBJ_HASH_SIZE]; + fobj_methods_hash[hash % FOBJ_OBJ_HASH_SIZE] = mh; + + __atomic_store_n(&fobj_methods_n, mh, __ATOMIC_RELEASE); + __atomic_store_n(meth, mh, __ATOMIC_RELEASE); + + pth_assert(pthread_mutex_unlock(&fobj_runtime_mutex)); + + return false; +} + +static inline void* +fobj_search_impl(fobj_method_handle_t meth, fobj_klass_handle_t klass) { + uint32_t i; + + i = atload(&fobj_klasses[klass].method_lists[meth%METHOD_PARTITIONS]); + do { + if (fobj_method_impl[i].method == meth) + return fobj_method_impl[i].impl; + i = fobj_method_impl[i].next_for_klass; + } while (i != 0); + + return NULL; +} + +void* +fobj_klass_method_search(fobj_klass_handle_t klass, fobj_method_handle_t meth) { + ft_assert(fobj_global_state != FOBJ_RT_NOT_INITIALIZED); + ft_dbg_assert(meth > 0 && meth <= atload(&fobj_methods_n)); + ft_dbg_assert(meth != fobj__nm_mhandle(fobjDispose)()); + ft_dbg_assert(klass > 0 && klass <= atload(&fobj_klasses_n)); + + do { + void *impl = fobj_search_impl(meth, klass); + if (impl) + return impl; + klass = fobj_klasses[klass].parent; + } while (klass != 0); + return NULL; +} + + +fobj__method_callback_t +fobj_method_search(const fobj_t self, fobj_method_handle_t meth, fobj_klass_handle_t for_child) { + fobj_header_t *h; + fobj_klass_handle_t klass; + fobj__method_callback_t cb = {self, NULL}; + + ft_assert(fobj_global_state != FOBJ_RT_NOT_INITIALIZED); + if (ft_dbg_enabled()) { + ft_assert(meth > 0 && meth <= atload(&fobj_methods_n)); + ft_assert(meth != fobj__nm_mhandle(fobjDispose)()); + } + + h = ((fobj_header_t*)self - 1); + assert(h->magic == FOBJ_HEADER_MAGIC); + klass = h->klass; + ft_dbg_assert(klass > 0 && klass <= atload(&fobj_klasses_n)); + + if (ft_unlikely(for_child != 0)) { + if (ft_unlikely(ft_dbg_enabled())) { + while (klass && klass != for_child) { + klass = fobj_klasses[klass].parent; + } + ft_assert(klass == for_child); + } else { + klass = for_child; + } + klass = fobj_klasses[klass].parent; + } + + do { + cb.impl = fobj_search_impl(meth, klass); + if (cb.impl != NULL) + return cb; + + klass = fobj_klasses[klass].parent; + } while (klass); + cb.self = NULL; + return cb; +} + +bool +fobj_method_implements(const fobj_t self, fobj_method_handle_t meth) { + fobj_header_t *h; + fobj_klass_handle_t klass; + + ft_assert(fobj_global_state != FOBJ_RT_NOT_INITIALIZED); + if (ft_dbg_enabled()) { + ft_assert(meth > 0 && meth <= atload(&fobj_methods_n)); + ft_assert(meth != fobj__nm_mhandle(fobjDispose)()); + } + + h = ((fobj_header_t*)self - 1); + assert(h->magic == FOBJ_HEADER_MAGIC); + klass = h->klass; + ft_dbg_assert(klass > 0 && klass <= atload(&fobj_klasses_n)); + + do { + if (fobj_search_impl(meth, klass) != NULL) + return true; + + klass = fobj_klasses[klass].parent; + } while (klass); + return false; +} + +const char * +fobj_klass_name(fobj_klass_handle_t klass) { + fobj_klass_registration_t *reg; + + ft_assert(fobj_global_state != FOBJ_RT_NOT_INITIALIZED); + ft_dbg_assert(klass && klass <= atload(&fobj_klasses_n)); + + reg = &fobj_klasses[klass]; + + return reg->name; +} + +fobj_klass_handle_t +fobj_real_klass_of(fobj_t self) { + fobj_header_t *h; + + ft_assert(fobj_global_state != FOBJ_RT_NOT_INITIALIZED); + ft_assert(self != NULL); + + h = ((fobj_header_t*)self - 1); + assert(h->magic == FOBJ_HEADER_MAGIC); + return h->klass; +} + +static void fobj_method_register_priv(fobj_klass_handle_t klass, + fobj_method_handle_t meth, + void* impl); + +bool +fobj_klass_init_impl(volatile fobj_klass_handle_t *klass, + ssize_t size, + fobj_klass_handle_t parent, + fobj__method_impl_box_t *methods, + const char *name) { + uint32_t hash, kl; + fobj_klass_registration_t *reg; + + ft_assert(fobj_global_state == FOBJ_RT_INITIALIZED); + ft_dbg_assert(klass); + + pth_assert(pthread_mutex_lock(&fobj_runtime_mutex)); + + if ((kl = *klass) != 0) { + reg = &fobj_klasses[kl]; + pth_assert(pthread_mutex_unlock(&fobj_runtime_mutex)); + ft_assert(kl <= atload(&fobj_klasses_n)); + ft_assert(strcmp(reg->name, name) == 0); + ft_assert(reg->size == size); + ft_assert(reg->parent == parent); + return true; + } + + hash = ft_small_cstr_hash(name); + kl = fobj_klasses_hash[hash % FOBJ_OBJ_HASH_SIZE]; + for (; kl != 0; kl = reg->hash_next) { + reg = &fobj_klasses[kl]; + if (reg->hash == hash && strcmp(reg->name, name) == 0) { + __atomic_store_n(klass, kl, __ATOMIC_RELEASE); + pth_assert(pthread_mutex_unlock(&fobj_runtime_mutex)); + ft_assert(reg->size == size); + ft_assert(reg->parent == parent); + return true; + } + } + + kl = fobj_klasses_n + 1; + ft_dbg_assert(kl > 0); + ft_assert(*klass < FOBJ_OBJ_MAX_KLASSES, "Too many klasses defined"); + reg = &fobj_klasses[kl]; + reg->size = size; + reg->name = name; + reg->parent = parent; + reg->hash = hash; + reg->hash_next = fobj_klasses_hash[hash % FOBJ_OBJ_HASH_SIZE]; + fobj_klasses_hash[hash % FOBJ_OBJ_HASH_SIZE] = kl; + + __atomic_store_n(&fobj_klasses_n, kl, __ATOMIC_RELEASE); + /* declare methods before store klass */ + while (methods->meth != 0) { + fobj_method_register_priv(kl, methods->meth, methods->impl); + methods++; + } + + __atomic_store_n(klass, kl, __ATOMIC_RELEASE); + + pth_assert(pthread_mutex_unlock(&fobj_runtime_mutex)); + + return false; +} + +static void +fobj_method_register_priv(fobj_klass_handle_t klass, fobj_method_handle_t meth, void* impl) { + fobj_method_registration_t *mreg; + fobj_klass_registration_t *kreg; + void *existed; + uint32_t nom; + + mreg = &fobj_methods[meth]; + kreg = &fobj_klasses[klass]; + + existed = fobj_search_impl(meth, klass); + ft_dbg_assert(existed == NULL || existed == impl, + "Method %s.%s is redeclared with different implementation", + kreg->name, mreg->name); + + if (existed == impl) { + return; + } + + nom = fobj_impls_n + 1; + ft_assert(nom < FOBJ_OBJ_MAX_METHOD_IMPLS); + fobj_method_impl[nom].method = meth; + fobj_method_impl[nom].klass = klass; + fobj_method_impl[nom].next_for_method = mreg->first; + fobj_method_impl[nom].next_for_klass = kreg->method_lists[meth%METHOD_PARTITIONS]; + fobj_method_impl[nom].impl = impl; + __atomic_store_n(&mreg->first, nom, __ATOMIC_RELEASE); + __atomic_store_n(&kreg->method_lists[meth%METHOD_PARTITIONS], nom, + __ATOMIC_RELEASE); + + if (meth == fobj__nm_mhandle(fobjDispose)()) + kreg->dispose = (fobj__nm_impl_t(fobjDispose)) impl; + + __atomic_store_n(&fobj_impls_n, nom, __ATOMIC_RELEASE); +} + +void +fobj_method_register_impl(fobj_klass_handle_t klass, fobj_method_handle_t meth, void* impl) { + ft_assert(fobj_global_state == FOBJ_RT_INITIALIZED); + ft_dbg_assert(meth > 0 && meth <= atload(&fobj_methods_n)); + ft_dbg_assert(klass > 0 && klass <= atload(&fobj_klasses_n)); + + pth_assert(pthread_mutex_lock(&fobj_runtime_mutex)); + + fobj_method_register_priv(klass, meth, impl); + + pth_assert(pthread_mutex_unlock(&fobj_runtime_mutex)); +} + +void* +fobj__allocate(fobj_klass_handle_t klass, void *init, ssize_t size) { + fobj_klass_registration_t *kreg; + fobj_header_t *hdr; + fobj_t self; + ssize_t copy_size; + + ft_assert(fobj_global_state != FOBJ_RT_NOT_INITIALIZED); + ft_dbg_assert(klass > 0 && klass <= atload(&fobj_klasses_n)); + + kreg = &fobj_klasses[klass]; + copy_size = kreg->size >= 0 ? kreg->size : -1-kreg->size; + if (size < 0) { + size = copy_size; + } else { + ft_assert(kreg->size < 0); + size += copy_size; + } + hdr = ft_calloc(sizeof(fobj_header_t) + size); +#ifndef NDEBUG + hdr->magic = FOBJ_HEADER_MAGIC; +#endif + hdr->klass = klass; + hdr->rc = 1; + self = (fobj_t)(hdr + 1); + if (init != NULL) + memcpy(self, init, copy_size); + fobj_autorelease(self, *fobj_AR_current_ptr()); + return self; +} + +fobj_t +fobj_ref(fobj_t self) { + fobj_header_t *h; + if (self == NULL) + return NULL; + h = ((fobj_header_t*)self - 1); + assert(h->magic == FOBJ_HEADER_MAGIC); + ft_assert(h->klass > 0 && h->klass <= atload(&fobj_klasses_n)); + __atomic_fetch_add(&h->rc, 1, __ATOMIC_ACQ_REL); + return self; +} + +void +fobj_set(fobj_t *ptr, fobj_t val) { + fobj_t oldval = *ptr; + *ptr = val ? fobj_ref(val) : NULL; + if (oldval) fobj_release(oldval); +} + +fobj_t +fobj_swap(fobj_t *ptr, fobj_t val) { + fobj_t oldval = *ptr; + *ptr = val ? fobj_ref(val) : NULL; + return oldval ? fobj_autorelease(oldval, *fobj_AR_current_ptr()) : NULL; +} + +fobj_t +fobj_unref(fobj_t val) { + return fobj_autorelease(val, *fobj_AR_current_ptr()); +} + +static void +fobj__dispose_req(fobj_t self, fobj_klass_registration_t *kreg) { + if (kreg->dispose) + kreg->dispose(self); + if (kreg->parent) { + fobj_klass_registration_t *preg; + + preg = &fobj_klasses[kreg->parent]; + fobj__dispose_req(self, preg); + } +} + +static void +fobj__do_dispose(fobj_t self, fobj_header_t *h, fobj_klass_registration_t *kreg) { + uint32_t old = __atomic_fetch_or(&h->flags, FOBJ_DISPOSING, __ATOMIC_ACQ_REL); + if (old & FOBJ_DISPOSING) + return; + fobj__dispose_req(self, kreg); + __atomic_fetch_or(&h->flags, FOBJ_DISPOSED, __ATOMIC_ACQ_REL); + + if (atload(&h->rc) == 0) + { + *h = (fobj_header_t){0}; + ft_free(h); + } +} + +void +fobj_dispose(fobj_t self) { + fobj_header_t *h; + fobj_klass_handle_t klass; + fobj_klass_registration_t *kreg; + + ft_assert(fobj_global_state != FOBJ_RT_NOT_INITIALIZED); + + if (self == NULL) + return; + + h = ((fobj_header_t*)self - 1); + assert(h->magic == FOBJ_HEADER_MAGIC); + klass = h->klass; + ft_dbg_assert(klass > 0 && klass <= atload(&fobj_klasses_n)); + kreg = &fobj_klasses[klass]; + + fobj__do_dispose(self, h, kreg); +} + +static void +fobj_release(fobj_t self) { + fobj_header_t *h; + fobj_klass_handle_t klass; + fobj_klass_registration_t *kreg; + + ft_assert(fobj_global_state != FOBJ_RT_NOT_INITIALIZED); + + if (self == NULL) + return; + + h = ((fobj_header_t*)self - 1); + assert(h->magic == FOBJ_HEADER_MAGIC); + klass = h->klass; + ft_dbg_assert(klass > 0 && klass <= atload(&fobj_klasses_n)); + kreg = &fobj_klasses[klass]; + + + if (__atomic_sub_fetch(&h->rc, 1, __ATOMIC_ACQ_REL) != 0) + return; + if ((atload(&h->flags) & FOBJ_DISPOSING) != 0) + return; + fobj__do_dispose(self, h, kreg); +} + +bool +fobj_disposing(fobj_t self) { + fobj_header_t *h; + + ft_assert(fobj_global_state != FOBJ_RT_NOT_INITIALIZED); + ft_assert(self != NULL); + + h = ((fobj_header_t*)self - 1); + assert(h->magic == FOBJ_HEADER_MAGIC); + return (atload(&h->flags) & FOBJ_DISPOSING) != 0; +} + +bool +fobj_disposed(fobj_t self) { + fobj_header_t *h; + + ft_assert(fobj_global_state != FOBJ_RT_NOT_INITIALIZED); + ft_assert(self != NULL); + + h = ((fobj_header_t*)self - 1); + assert(h->magic == FOBJ_HEADER_MAGIC); + return (atload(&h->flags) & FOBJ_DISPOSED) != 0; +} + +static fobj_klass_handle_t +fobjBase_fobjKlass(fobj_t self) { + return fobj_real_klass_of(self); +} + +static struct fobjStr* +fobjBase_fobjRepr(VSelf) { + Self(fobjBase); + fobj_klass_handle_t klass = fobjKlass(self); + return fobj_sprintf("%s@%p", fobj_klass_name(klass), self); +} + +err_i +fobj_err_combine(err_i fst, err_i scnd) { + fobjErr* first = (fobjErr*)fst.self; + fobjErr* second = (fobjErr*)scnd.self; + fobjErr **tail; + if (first == NULL) + return scnd; + if (second == NULL) + return fst; + ft_assert(fobj_real_klass_of(first) == fobjErr__kh()); + ft_assert(fobj_real_klass_of(second) == fobjErr__kh()); + if (first->sibling != NULL) { + tail = &second->sibling; + while (*tail != NULL) tail = &(*tail)->sibling; + /* ownership is also transferred */ + *tail = first->sibling; + } + first->sibling = $ref(second); + return fst; +} + +fobjStr* +fobj_newstr(ft_str_t s, bool gifted) { + fobjStr *str; + ft_assert(s.len < UINT32_MAX-2); + if (!gifted) { + str = fobj_alloc_sized(fobjStr, s.len + 1, .len = s.len); + memcpy(str->_buf, s.ptr, s.len); + str->_buf[s.len] = '\0'; + str->ptr = str->_buf; + } else { + str = fobj_alloc(fobjStr, .len = s.len, .ptr = s.ptr); + } + return str; +} + +static void +fobjStr_fobjDispose(VSelf) { + Self(fobjStr); + if (self->ptr != self->_buf) { + ft_free((void*)self->ptr); + } +} + +fobjStr* +fobj_strcat(fobjStr *self, ft_str_t s) { + fobjStr *newstr; + size_t alloc_len = self->len + s.len + 1; + ft_assert(alloc_len < UINT32_MAX-2); + + if (s.len == 0) + return $unref($ref(self)); + + newstr = fobj_alloc_sized(fobjStr, alloc_len, .len = alloc_len-1); + memcpy(newstr->_buf, self->ptr, self->len); + memcpy(newstr->_buf + self->len, s.ptr, s.len); + newstr->_buf[newstr->len] = '\0'; + newstr->ptr = newstr->_buf; + return newstr; +} + +fobjStr* +fobj_sprintf(const char *fmt, ...) { + char buffer[256] = {0}; + ft_strbuf_t buf = ft_strbuf_init_stack(buffer, 256); + va_list args; + + va_start(args, fmt); + ft_strbuf_vcatf(&buf, fmt, args); + va_end(args); + + return fobj_strbuf_steal(&buf); +} + +fobjStr* +fobj_strcatf(fobjStr *ostr, const char *fmt, ...) { + ft_strbuf_t buf = ft_strbuf_init_str(fobj_getstr(ostr)); + bool err; + va_list args; + + va_start(args, fmt); + ft_strbuf_vcatf_err(&buf, &err, fmt, args); + va_end(args); + + if (err) { + ft_log(FT_ERROR, "error printing format '%s'", fmt); + return NULL; + } + + /* empty print? */ + if (buf.ptr == ostr->ptr) { + return $unref($ref(ostr)); + } + return fobj_newstr(ft_strbuf_steal(&buf), true); +} + +fobjStr* +fobj_tostr(fobj_t obj, const char *fmt) { + char buffer[32]; + ft_strbuf_t buf = ft_strbuf_init_stack(buffer, 32); + + if (obj == NULL) { + return fobj_str(""); + } + + if (!$ifdef(, fobjFormat, obj, &buf, fmt)) { + /* fallback to Repr */ + return $(fobjRepr, obj); + } + return fobj_strbuf_steal(&buf); +} + +static void +fobj_format_string(ft_strbuf_t *buf, ft_str_t str, const char *fmt) { + int i; + char c; + + if (fmt == NULL || fmt[0] == '\0') { + ft_strbuf_cat(buf, str); + return; + } else if (strcmp(fmt, "q") != 0) { + char realfmt[32] = "%"; + + ft_assert(ft_strlcat(realfmt, fmt, 32) < 32); + ft_strbuf_catf(buf, realfmt, str.ptr); + + return; + } + + /* Ok, we're asked for quoted representation */ + if (str.ptr == NULL) { + ft_strbuf_catc(buf, "NULL"); + } + + ft_strbuf_cat1(buf, '"'); + for (i = 0; i < str.len; i++) { + c = str.ptr[i]; + switch (c) { + case '\"': ft_strbuf_catc(buf, "\\\""); break; + case '\t': ft_strbuf_catc(buf, "\\t"); break; + case '\n': ft_strbuf_catc(buf, "\\n"); break; + case '\r': ft_strbuf_catc(buf, "\\r"); break; + case '\a': ft_strbuf_catc(buf, "\\a"); break; + case '\b': ft_strbuf_catc(buf, "\\b"); break; + case '\f': ft_strbuf_catc(buf, "\\f"); break; + case '\v': ft_strbuf_catc(buf, "\\v"); break; + case '\\': ft_strbuf_catc(buf, "\\\\"); break; + default: + if (c < 0x20) { + ft_strbuf_catc(buf, "\\x"); + ft_strbuf_cat2(buf, '0'+(c>>4), ((c&0xf)<=9?'0':'a')+(c&0xf)); + } else { + ft_strbuf_cat1(buf, c); + } + } + } + ft_strbuf_cat1(buf, '"'); +} + +static fobjStr* +fobjStr_fobjRepr(VSelf) { + Self(fobjStr); + char buffer[32] = {0}; + ft_strbuf_t buf = ft_strbuf_init_stack(buffer, 32); + + ft_strbuf_catc(&buf, "$S("); + fobj_format_string(&buf, fobj_getstr(self), "q"); + ft_strbuf_cat1(&buf, ')'); + + return fobj_strbuf_steal(&buf); +} + +static void +fobjStr_fobjFormat(VSelf, ft_strbuf_t *out, const char *fmt) { + Self(fobjStr); + fobj_format_string(out, fobj_getstr(self), fmt); +} + +static fobjStr* +fobjInt_fobjRepr(VSelf) { + Self(fobjInt); + return fobj_sprintf("$I(%"PRIi64")", self->i); +} + +static void +fobj_format_int(ft_strbuf_t *buf, uint64_t i, bool _signed, const char *fmt) { + char tfmt[32] = "%"; + char base; + size_t fmtlen; + + + if (fmt == NULL || fmt[0] == 0) { + if (_signed) { + ft_strbuf_catf(buf, "%"PRIi64, (int64_t)i); + } else { + ft_strbuf_catf(buf, "%"PRIu64, (uint64_t)i); + } + return; + } + + /* need to clean length specifiers ('l', 'll', 'z') */ + fmtlen = ft_strlcat(tfmt, fmt, 32); + ft_assert(fmtlen<28); + base = tfmt[fmtlen-1]; + ft_assert(base=='x' || base=='X' || base=='o' || base=='u' || + base=='d' || base=='i'); + do fmtlen--; + while (tfmt[fmtlen-1] == 'l' || tfmt[fmtlen-1] == 'z'); + tfmt[fmtlen] = '\0'; + + /* now add real suitable format */ + switch (base) { + case 'x': strcat(tfmt + fmtlen, PRIx64); break; + case 'X': strcat(tfmt + fmtlen, PRIX64); break; + case 'o': strcat(tfmt + fmtlen, PRIo64); break; + case 'u': strcat(tfmt + fmtlen, PRIu64); break; + case 'd': strcat(tfmt + fmtlen, PRId64); break; + default: + case 'i': strcat(tfmt + fmtlen, PRIi64); break; + } + + switch (base) { + case 'd': case 'i': + ft_strbuf_catf(buf, tfmt, (int64_t)i); + break; + default: + ft_strbuf_catf(buf, tfmt, (uint64_t)i); + break; + } +} + +static void +fobjInt_fobjFormat(VSelf, ft_strbuf_t *buf, const char *fmt) { + Self(fobjInt); + fobj_format_int(buf, self->i, true, fmt); +} + +static fobjStr* +fobjUInt_fobjRepr(VSelf) { + Self(fobjUInt); + return fobj_sprintf("$U(%"PRIu64")", self->u); +} + +static void +fobjUInt_fobjFormat(VSelf, ft_strbuf_t *buf, const char *fmt) { + Self(fobjUInt); + fobj_format_int(buf, self->u, false, fmt); +} + +static fobjStr* +fobjFloat_fobjRepr(VSelf) { + Self(fobjFloat); + return fobj_sprintf("%f", self->f); +} + +static void +fobj_format_float(ft_strbuf_t *buf, double f, const char *fmt) { + char tfmt[32] = "%"; + + if (fmt == NULL || fmt[0] == 0) { + ft_strbuf_catf(buf, "%f", f); + return; + } + ft_strlcat(tfmt, fmt, 32); + ft_strbuf_catf(buf, tfmt, f); +} + +static void +fobjFloat_fobjFormat(VSelf, ft_strbuf_t *buf, const char *fmt) { + Self(fobjFloat); + fobj_format_float(buf, self->f, fmt); +} + +static fobjStr* trueRepr = NULL; +static fobjStr* falseRepr = NULL; +static fobjStr* trueStr = NULL; +static fobjStr* falseStr = NULL; + +static fobjStr* +fobjBool_fobjRepr(VSelf) { + Self(fobjBool); + return self->b ? trueRepr : falseRepr; +} + +static void +fobj_format_bool(ft_strbuf_t *buf, bool b, const char *fmt) { + char tfmt[32] = "%"; + size_t fmtlen; + const char *repr = NULL; + + if (fmt == NULL || fmt[0] == 0) { + if (b) + ft_strbuf_catc(buf, "true"); + else + ft_strbuf_catc(buf, "false"); + return; + } + fmtlen = ft_strlcat(tfmt, fmt, 32); + switch (tfmt[fmtlen-1]) { + case 'B': repr = b ? "TRUE" : "FALSE"; break; + case 'b': repr = b ? "true" : "false"; break; + case 'P': repr = b ? "True" : "False"; break; + case 'Y': repr = b ? "Yes" : "No"; break; + case 'y': repr = b ? "yes" : "no"; break; + } + if (repr != NULL) { + tfmt[fmtlen-1] = 's'; + ft_strbuf_catf(buf, tfmt, repr); + } else { + ft_strbuf_catf(buf, tfmt, b); + } +} + +static void +fobjBool_fobjFormat(VSelf, ft_strbuf_t *buf, const char *fmt) { + Self(fobjBool); + fobj_format_bool(buf, self->b, fmt); +} + +static void +fobj_format_arg(ft_strbuf_t *out, ft_arg_t arg, const char *fmt) { + switch (ft_arg_type(arg)) { + case 'i': + fobj_format_int(out, (uint64_t)arg.v.i, true, fmt); + break; + case 'u': + fobj_format_int(out, arg.v.i, false, fmt); + break; + case 'f': + fobj_format_float(out, arg.v.f, fmt); + break; + case 's': + fobj_format_string(out, ft_cstr(arg.v.s), fmt); + break; + case 'b': + fobj_format_bool(out, arg.v.b, fmt); + break; + case 'o': + if (arg.v.o == NULL) { + ft_strbuf_catc(out, "(null)"); + } else if (!$ifdef(, fobjFormat, arg.v.o, out, fmt)) { + fobjStr* repr = $(fobjRepr, arg.v.o); + ft_strbuf_cat(out, fobj_getstr(repr)); + } + break; + default: + ft_assert(false, "Could not format arg of type '%c'", ft_arg_type(arg)); + } +} + +static void +fobj_repr_arg(ft_strbuf_t *out, ft_arg_t arg) { + fobjStr* repr; + switch (ft_arg_type(arg)) { + case 'i': + fobj_format_int(out, (uint64_t)arg.v.i, true, "i"); + break; + case 'u': + fobj_format_int(out, arg.v.u, false, NULL); + break; + case 'f': + fobj_format_float(out, arg.v.f, NULL); + break; + case 's': + fobj_format_string(out, ft_cstr(arg.v.s), "q"); + break; + case 'b': + fobj_format_bool(out, arg.v.b, NULL); + break; + case 'o': + if (arg.v.o == NULL) { + ft_strbuf_catc(out, "NULL"); + } else { + repr = $(fobjRepr, arg.v.o); + ft_strbuf_cat(out, fobj_getstr(repr)); + } + break; + default: + ft_assert(false, "Could not represent arg of type '%c'", ft_arg_type(arg)); + } +} + +static const char* +fobj__format_errmsg(const char* msg, fobj_err_kv_t *kvs) { + char buf[128]; + ft_strbuf_t out = ft_strbuf_init_stack(buf, 128); + bool found; + const char* cur; + char* closebrace; + char* formatdelim; + size_t identlen; + size_t formatlen; + char ident[32]; + char format[32]; + fobj_err_kv_t* kv; + + if (strchr(msg, '{') == NULL || strchr(msg, '}') == NULL) + return ft_cstrdup(msg); + + for (cur = msg; *cur; cur++) { + if (*cur != '{') { + ft_strbuf_cat1(&out, *cur); + continue; + } + if (cur[1] == '{') { + ft_strbuf_cat1(&out, '{'); + cur++; + continue; + } + cur++; + closebrace = strchr(cur, '}'); + ft_assert(closebrace, "error format string braces unbalanced"); + formatdelim = memchr(cur, ':', closebrace - cur); + identlen = (formatdelim ?: closebrace) - cur; + ft_assert(identlen <= 31, + "ident is too long in message \"%s\"", msg); + ft_assert(formatdelim == NULL || closebrace - formatdelim <= 31, + "format is too long in message \"%s\"", msg); + strncpy(ident, cur, identlen); + ident[identlen] = 0; + formatlen = formatdelim ? closebrace - (formatdelim+1) : 0; + if (formatlen > 0) { + strncpy(format, formatdelim + 1, formatlen); + } + format[formatlen] = 0; + kv = kvs; + found = false; + for (;kv->key != NULL; kv++) { + if (strcmp(kv->key, ident) == 0) { + found = true; + fobj_format_arg(&out, kv->val, format); + break; + } + } + ft_dbg_assert(found, "ident '%s' is not found (message \"%s\")", ident, msg); + cur = closebrace; + } + + return ft_strbuf_steal(&out).ptr; +} + +extern err_i +fobj__make_err(const char *type, + ft_source_position_t src, + const char *msg, + fobj_err_kv_t *kvs, + size_t kvn) { + fobjErr* err; + fobj_err_kv_t* kv; + fobj_err_kv_t* cpy; + ft_strbuf_t nmsg; + + err = fobj_alloc_sized(fobjErr, + ft_mul_size(sizeof(*kvs), kvn+1), + .type = type ?: "RT", + .src = src); + err->src.file = ft__truncate_log_filename(err->src.file); + msg = msg ?: err->type ?: "Unspecified Error"; + nmsg = ft_strbuf_init_str(ft_cstr(msg)); + /* search for suffix */ + if (kvn > 0) { + memcpy(err->kv, kvs, sizeof(*kvs)*kvn); + cpy = err->kv; + for (kv = err->kv; kv->key; kv++) { + if (strcmp(kv->key, "__msgSuffix") == 0) { + ft_strbuf_catc(&nmsg, ft_arg_s(kv->val)); + continue; + } + switch (ft_arg_type(kv->val)) { + case 'o': + $ref(ft_arg_o(kv->val)); + break; + case 's': + kv->val.v.s = kv->val.v.s ? ft_cstrdup(kv->val.v.s) : NULL; + break; + } + if (cpy != kv) + *cpy = *kv; + cpy++; + } + if (cpy != kv) + *cpy = (fobj_err_kv_t){NULL, ft_mka_z()}; + } + err->message = fobj__format_errmsg(ft_strbuf_ref(&nmsg).ptr, err->kv); + ft_strbuf_free(&nmsg); + return bind_err(err); +} + +static void +fobjErr_fobjDispose(VSelf) { + Self(fobjErr); + fobj_err_kv_t *kv; + for (kv = self->kv; kv->key != NULL; kv++) { + switch (ft_arg_type(kv->val)) { + case 'o': + $del(&kv->val.v.o); + break; + case 's': + ft_free(kv->val.v.s); + break; + } + } + $del(&self->sibling); +} + +static fobjStr* +fobjErr_fobjRepr(VSelf) { + Self(fobjErr); + char buffer[256]; + ft_strbuf_t buf = ft_strbuf_init_stack(buffer, 256); + fobj_err_kv_t* kv = self->kv; + + ft_strbuf_catc(&buf, "$err("); + ft_strbuf_catc(&buf, self->type); + ft_strbuf_catc(&buf, ", "); + fobj_format_string(&buf, ft_cstr(self->message), "q"); + for (;kv->key; kv++) { + ft_strbuf_catc(&buf, ", ("); + ft_strbuf_catc(&buf, kv->key); + ft_strbuf_catc(&buf, ", "); + fobj_repr_arg(&buf, kv->val); + ft_strbuf_cat1(&buf, ')'); + } + ft_strbuf_cat1(&buf, ')'); + return fobj_strbuf_steal(&buf); +} + +static void +fobjErr_fobjFormat(VSelf, ft_strbuf_t *buf, const char *fmt) { + Self(fobjErr); + const char* c; + fobj_err_kv_t* kv = self->kv; + + if (fmt == NULL || fmt[0] == 0) { + // fmt = "$T: $M ($F@$f:$l)"; + ft_strbuf_catf(buf, "%s: %s (%s@%s:%d)", + self->type, self->message, + self->src.func, self->src.file, self->src.line); + return; + } + + for (c = fmt; *c; c++) { + if (*c != '$') { + ft_strbuf_cat1(buf, *c); + continue; + } + c++; + switch (*c) { + case 0: c--; break; + case '$': ft_strbuf_cat1(buf, '$'); break; + case 'T': ft_strbuf_catc(buf, self->type); break; + case 'M': ft_strbuf_catc(buf, self->message); break; + case 'F': ft_strbuf_catc(buf, self->src.func); break; + case 'f': ft_strbuf_catc(buf, self->src.file); break; + case 'l': ft_strbuf_catf(buf, "%d", self->src.line); break; + case 'K': + ft_strbuf_cat1(buf, '{'); + for (kv = self->kv; kv->key; kv++) { + if (kv != self->kv) + ft_strbuf_catc(buf, ", "); + fobj_format_string(buf, ft_cstr(kv->key), NULL); + ft_strbuf_catc(buf, ": "); + fobj_format_arg(buf, kv->val, NULL); + } + ft_strbuf_cat1(buf, '}'); + break; + default: + ft_log(FT_ERROR, "Unknown error format character '%c'", *c); + } + } +} + +ft_arg_t +fobj_err_getkv(err_i err, const char *key, ft_arg_t dflt, bool *found) { + fobjErr* oerr = (fobjErr*)(err.self); + fobj_err_kv_t* kv; + if (oerr == NULL) return dflt; + ft_assert(fobj_real_klass_of(oerr) == fobjErr__kh()); \ + kv = oerr->kv; + for (;kv->key != NULL; kv++) { + if (strcmp(kv->key, key) == 0) { + if (found) *found = true; + return kv->val; + } + } + if (found) found = false; + return dflt; +} + +fobjStr* +fobj_printkv(const char *fmt, ft_slc_fokv_t kvs) { + char buf[128]; + ft_strbuf_t out = ft_strbuf_init_stack(buf, 128); + size_t i; + const char* cur; + char* closebrace; + char* formatdelim; + size_t identlen; + size_t formatlen; + char ident[32]; + char format[32]; + + if (strchr(fmt, '{') == NULL || strchr(fmt, '}') == NULL) { + return fobj_str(fmt); + } + + for (cur = fmt; *cur; cur++) { + if (*cur != '{') { + ft_strbuf_cat1(&out, *cur); + continue; + } + if (cur[1] == '{') { + ft_strbuf_cat1(&out, '{'); + cur++; + continue; + } + cur++; + closebrace = strchr(cur, '}'); + ft_assert(closebrace, "format string braces unbalanced"); + formatdelim = memchr(cur, ':', closebrace - cur); + identlen = (formatdelim ?: closebrace) - cur; + ft_assert(identlen <= 31, + "ident is too long in format \"%s\"", fmt); + ft_assert(formatdelim == NULL || closebrace - formatdelim <= 31, + "format is too long in format \"%s\"", fmt); + strncpy(ident, cur, identlen); + ident[identlen] = 0; + formatlen = formatdelim ? closebrace - (formatdelim+1) : 0; + if (formatlen > 0) { + strncpy(format, formatdelim + 1, formatlen); + } + format[formatlen] = 0; + i = ft_search_fokv(kvs.ptr, kvs.len, ident, fobj_fokv_cmpc); + if (ft_unlikely(i >= kvs.len)) { + ft_log(FT_WARNING, "ident '%s' is not found (fmt \"%s\")", ident, fmt); + } else if (kvs.ptr[i].value == NULL) { + ft_strbuf_catc(&out, "NULL"); + } else if (!$ifdef(, fobjFormat, kvs.ptr[i].value, &out, format)) { + /* fallback to repr */ + ft_strbuf_cat(&out, fobj_getstr($repr(kvs.ptr[i].value))); + } + cur = closebrace; + } + + return fobj_strbuf_steal(&out); +} + +#ifndef WIN32 +static pthread_key_t fobj_AR_current_key = 0; +static void fobj_destroy_thread_AR(void *arg); +#endif + +/* Custom fobjBase implementation */ +fobj_klass_handle_t +fobjBase__kh(void) { + static volatile fobj_klass_handle_t hndl = 0; + fobj_klass_handle_t khandle = hndl; + ssize_t kls_size = sizeof(fobjBase); + if (khandle) return khandle; + { + fobj__method_impl_box_t methods[] = { + fobj__klass_decl_methods(fobjBase, fobj__map_params(kls__fobjBase)) + { 0, NULL } + }; + if (fobj_klass_init_impl(&hndl, kls_size, 0, methods, "fobjBase")) + return hndl; + } + khandle = hndl; + return khandle; +} + +fobj_klass_handle(fobjErr, mth(fobjRepr), varsized(kv)); +fobj_klass_handle(fobjStr, mth(fobjDispose), varsized(_buf)); +fobj_klass_handle(fobjInt); +fobj_klass_handle(fobjUInt); +fobj_klass_handle(fobjFloat); +fobj_klass_handle(fobjBool); + +void +fobj_init(void) { + ft_assert(fobj_global_state == FOBJ_RT_NOT_INITIALIZED); + +#ifndef WIN32 + { + int res = pthread_key_create(&fobj_AR_current_key, fobj_destroy_thread_AR); + if (res != 0) { + fprintf(stderr, "could not initialize autorelease thread key: %s", + strerror(res)); + abort(); + } + } +#endif + + fobj_global_state = FOBJ_RT_INITIALIZED; + + fobj__consume(fobjDispose__mh()); + fobj_klass_init(fobjBase); + fobj_klass_init(fobjErr); + fobj_klass_init(fobjStr); + fobj_klass_init(fobjInt); + fobj_klass_init(fobjUInt); + fobj_klass_init(fobjFloat); + fobj_klass_init(fobjBool); + + FOBJ_FUNC_ARP(); + + falseStr = $ref($S("false")); + trueStr = $ref($S("true")); +} + +void +fobj_freeze(void) { + fobj_global_state = FOBJ_RT_FROZEN; +} + +/* Without this function clang could commit initialization of klass without methods */ +volatile uint16_t fobj__FAKE__x; +void +fobj__consume(uint16_t _) { + fobj__FAKE__x += _; +} + +// AUTORELEASE POOL + +static void fobj_autorelease_pool_release_till(fobj_autorelease_pool **from, fobj_autorelease_pool *till); + +#ifndef __TINYC__ +static __thread fobj_autorelease_pool *fobj_AR_current = NULL; +#ifndef WIN32 +static __thread bool fobj_AR_current_set = false; +#endif +static inline fobj_autorelease_pool** +fobj_AR_current_ptr(void) { + ft_assert(fobj_global_state != FOBJ_RT_NOT_INITIALIZED); + +#ifndef WIN32 + if (!fobj_AR_current_set) + pthread_setspecific(fobj_AR_current_key, &fobj_AR_current); +#endif + return &fobj_AR_current; +} +#ifndef WIN32 +static void +fobj_destroy_thread_AR(void *arg) { + ft_assert(arg == &fobj_AR_current); + fobj_autorelease_pool_release_till(&fobj_AR_current, NULL); +} +#endif +#else +static fobj_autorelease_pool** +fobj_AR_current_ptr(void) { + fobj_autorelease_pool **current; + + ft_assert(fobj_global_state != FOBJ_RT_NOT_INITIALIZED); + + current = pthread_getspecific(fobj_AR_current_key); + if (current == NULL) { + current = ft_calloc(sizeof(fobj_autorelease_pool*)); + pthread_setspecific(fobj_AR_current_key, current); + } + return current; +} + +static void +fobj_destroy_thread_AR(void *arg) { + fobj_autorelease_pool **current = arg; + + fobj_autorelease_pool_release_till(current, NULL); + ft_free(current); +} +#endif + +fobj__autorelease_pool_ref +fobj_autorelease_pool_init(fobj_autorelease_pool *pool) { + fobj_autorelease_pool **parent = fobj_AR_current_ptr(); + pool->ref.parent = *parent; + pool->ref.root = parent; + pool->last = &pool->first; + pool->first.prev = NULL; + pool->first.cnt = 0; + *parent = pool; + return pool->ref; +} + +void +fobj_autorelease_pool_release(fobj_autorelease_pool *pool) { + fobj_autorelease_pool_release_till(pool->ref.root, pool->ref.parent); +} + +static void +fobj_autorelease_pool_release_till(fobj_autorelease_pool **from, fobj_autorelease_pool *till) { + fobj_autorelease_pool *current; + fobj_autorelease_chunk *chunk; + + while (*from != till) { + current = *from; + while (current->last != ¤t->first || current->last->cnt != 0) { + chunk = current->last; + if (chunk->cnt == 0) { + current->last = chunk->prev; + ft_free(chunk); + continue; + } + fobj_del(&chunk->refs[--chunk->cnt]); + } + ft_assert(*from == current); + *from = (*from)->ref.parent; + } +} + +static fobj_t +fobj_autorelease(fobj_t obj, fobj_autorelease_pool *pool) { + fobj_autorelease_chunk *chunk, *new_chunk; + + ft_assert(pool != NULL); + + chunk = pool->last; + if (chunk->cnt == FOBJ_AR_CHUNK_SIZE) { + new_chunk = ft_calloc(sizeof(fobj_autorelease_chunk)); + new_chunk->prev = chunk; + pool->last = chunk = new_chunk; + } + chunk->refs[chunk->cnt] = obj; + chunk->cnt++; + return obj; +} + +fobj_t +fobj_store_to_parent_pool(fobj_t obj, fobj_autorelease_pool *child_pool_or_null) { + return fobj_autorelease(obj, + (child_pool_or_null ?: *fobj_AR_current_ptr())->ref.parent); +} + +ft_register_source(); diff --git a/src/fu_util/impl/fo_impl.h b/src/fu_util/impl/fo_impl.h new file mode 100644 index 000000000..3e97c39a8 --- /dev/null +++ b/src/fu_util/impl/fo_impl.h @@ -0,0 +1,798 @@ +/* vim: set expandtab autoindent cindent ts=4 sw=4 sts=4 */ +#ifndef FOBJ_OBJ_PRIV_H +#define FOBJ_OBJ_PRIV_H + +#define Self_impl(Klass) \ + Klass * self ft_unused = Vself; fobj_klass_handle_t fobj__klassh ft_unused = fobj__nm_khandle(Klass)() + +typedef uint16_t fobj_klass_handle_t; +typedef uint16_t fobj_method_handle_t; + +/* Named argument handling tools */ +#if defined(__clang__) || defined(__clang_analyzer__) +#define fobj__push_ignore_initializer_overrides \ + _Pragma("clang diagnostic push"); \ + _Pragma("clang diagnostic ignored \"-Winitializer-overrides\"") +#define fobj__pop_ignore_initializer_overrides \ + _Pragma("clang diagnostic pop") +#else +#define fobj__push_ignore_initializer_overrides +#define fobj__pop_ignore_initializer_overrides +#endif + +#ifndef NDEBUG + +typedef struct { unsigned char is_set: 1; } *fobj__missing_argument_detector; +#define fobj__dumb_arg ((fobj__missing_argument_detector)(uintptr_t)1) +#define fobj__check_arg(name) ft_dbg_assert(fobj__nm_given(name) != NULL); + +#else + +typedef struct fobj__missing_argument_detector { +} fobj__missing_argument_detector; +#define fobj__dumb_arg {} +#define fobj__check_arg(name) + +#endif + +typedef struct { + fobj_method_handle_t meth; + void* impl; +} fobj__method_impl_box_t; + +/* param to tuple coversion */ + +#define fobj__map_params(...) \ + fm_eval(fm_foreach_comma(fobj__map_param, __VA_ARGS__)) +#define fobj__map_params_(...) \ + fm_foreach_comma(fobj__map_param, __VA_ARGS__) +#define fobj__map_param(param) \ + fm_cat(fobj__map_param_, param) +#define fobj__map_param_varsized(...) (varsized, __VA_ARGS__) +#define fobj__map_param_mth(...) (mth, __VA_ARGS__) +#define fobj__map_param_opt(...) (opt, __VA_ARGS__) +#define fobj__map_param_iface(...) (iface, __VA_ARGS__) +#define fobj__map_param_inherits(parent) (inherits, parent) + +/* Standard naming */ + +#define fobj__nm_mth(meth) mth__##meth +#define fobj__nm_mthdflt(meth) mth__##meth##__optional +#define fobj__nm_kls(klass) kls__##klass +#define fobj__nm_iface(iface) iface__##iface +#define fobj__nm_mhandle(meth) meth##__mh +#define fobj__nm_params_t(meth) meth##__params_t +#define fobj__nm_invoke(meth) fobj__invoke_##meth +#define fobj__nm_impl_t(meth) meth##__impl +#define fobj__nm_cb(meth) fetch_cb_##meth +#define fobj__nm_cb_t(meth) meth##__cb +#define fobj__nm_register(meth) fobj__register_##meth /* due to tcc bug, we can't use meth##__register */ +#define fobj__nm_wrap_decl(meth) fobj__wrap_decl_##meth +#define fobj__nm_meth_i(meth) meth##_i +#define fobj__nm_has(m) has_##m +#define fobj__nm_bind(m_or_i) bind_##m_or_i +#define fobj__nm_bindref(m_or_i) bindref_##m_or_i +#define fobj__nm_implements(m_or_i) implements_##m_or_i +#define fobj__nm_khandle(klass) klass##__kh +#define fobj__nm_klass_meth(klass, meth) klass##_##meth +#define fobj__nm_iface_i(iface) iface##_i +#define fobj__nm_given(param) param##__given +#define fobj__nm_kvalidate(m_or_i) fobj__klass_validate_##m_or_i + +/* Method definition */ +#define fobj__predefine_method(method) \ + ft_inline ft_gcc_const fobj_method_handle_t fobj__nm_mhandle(method)(void) + +#define fobj__define_method(meth) \ + fobj__method_declare_i(meth, fobj__nm_mth(meth)) +#define fobj__method_declare_i(meth, ...) \ + fobj__method_declare(meth, __VA_ARGS__) + +#define fobj__method_declare(meth, res, ...) \ + fobj__method_declare_impl(meth, \ + fobj__nm_mhandle(meth), \ + fobj__nm_params_t(meth), \ + fobj__nm_invoke(meth), \ + fobj__nm_impl_t(meth), \ + fobj__nm_cb(meth), \ + fobj__nm_cb_t(meth), \ + fobj__nm_register(meth), \ + fobj__nm_wrap_decl(meth), \ + fobj__nm_meth_i(meth), \ + fobj__nm_bind(meth), \ + fobj__nm_bindref(meth), \ + fobj__nm_implements(meth), \ + fobj__nm_kvalidate(meth), \ + fm_va_comma(__VA_ARGS__), \ + res, __VA_ARGS__) + +#define fobj__method_declare_impl(meth, handle, \ + params_t, \ + invoke_methparams, \ + impl_meth_t, \ + cb_meth, cb_meth_t, \ + _register_meth, wrap_decl, \ + meth_i, bind_meth, bindref_meth, implements_meth, \ + kvalidate, comma, res, ...) \ + \ + ft_inline ft_gcc_const fobj_method_handle_t handle(void) { \ + static volatile fobj_method_handle_t hndl = 0; \ + fobj_method_handle_t h = hndl; \ + if (h) return h; \ + fobj_method_init_impl(&hndl, fm_str(meth)); \ + return hndl; \ + } \ + \ + typedef res (* impl_meth_t)(fobj_t self comma fobj__mapArgs_toArgs(__VA_ARGS__)); \ + \ + typedef struct params_t { \ + fobj__missing_argument_detector fobj__dumb_first_param; \ + fobj__mapArgs_toFields(__VA_ARGS__) \ + } params_t; \ + \ + typedef struct cb_meth_t { \ + fobj_t self; \ + impl_meth_t impl; \ + } cb_meth_t; \ + \ + ft_inline cb_meth_t \ + cb_meth(fobj_t self, fobj_klass_handle_t parent) { \ + fobj__method_callback_t fnd = {NULL, NULL}; \ + if (self != NULL) { \ + fnd = fobj_method_search(self, handle(), parent); \ + } \ + return (cb_meth_t){fnd.self, fnd.impl}; \ + } \ + \ + ft_inline res \ + meth(fobj_t self comma fobj__mapArgs_toArgs(__VA_ARGS__)) { \ + cb_meth_t cb = cb_meth(self, fobj_self_klass); \ + ft_assert(cb.impl != NULL && cb.self != NULL); \ + ft_dbg_assert(!fobj__disposed(cb.self)); \ + return cb.impl(cb.self comma fobj__mapArgs_toNames(__VA_ARGS__)); \ + } \ + \ + ft_inline void \ + _register_meth(fobj_klass_handle_t klass, impl_meth_t cb) { \ + fobj_method_register_impl(klass, handle(), (void *)cb); \ + } \ + \ + ft_inline fobj__method_impl_box_t \ + wrap_decl(impl_meth_t cb) { \ + return (fobj__method_impl_box_t) { handle(), cb }; \ + } \ + \ + typedef union meth_i { \ + fobj_t self; \ + uintptr_t fobj__nm_has(meth); \ + } meth_i;\ + \ + ft_inline meth_i \ + bind_meth(fobj_t self) { \ + meth_i _iface = (meth_i){.self = self}; \ + ft_assert(cb_meth(self, fobj_self_klass).impl != NULL); \ + return _iface; \ + } \ + \ + ft_inline bool \ + implements_meth(fobj_t self, meth_i *ifacep) { \ + meth_i _iface = (meth_i){.self = self}; \ + cb_meth_t cb = cb_meth(self, fobj_self_klass); \ + if (ifacep != NULL) \ + *ifacep = cb.impl != NULL ? _iface : (meth_i){NULL}; \ + return cb.impl != NULL; \ + } \ + \ + ft_inline meth_i \ + bindref_meth(fobj_t self) { \ + meth_i _iface = bind_meth(self); \ + fobj_ref(_iface.self); \ + return _iface; \ + } \ + \ + ft_inline void \ + kvalidate(fobj_klass_handle_t khandle) { \ + ft_assert(fobj_klass_method_search(khandle, handle()) != NULL); \ + } \ + \ + ft_inline res \ + invoke_methparams(cb_meth_t cb, params_t params) { \ + ft_assert(cb.impl != NULL && cb.self != NULL); \ + ft_dbg_assert(!fobj__disposed(cb.self)); \ + fobj__assertArgs(__VA_ARGS__) \ + return cb.impl(cb.self comma fobj__mapArgs_toNamedParams(__VA_ARGS__)); \ + } \ + \ + fm__dumb_require_semicolon + +#define fobj__mapArgs_toArgs_do(x, y, ...) x y +#define fobj__mapArgs_toArgs(...) \ + fm_eval(fm_foreach_tuple_comma(fobj__mapArgs_toArgs_do, __VA_ARGS__)) + +#define fobj__mapArgs_toFields_do(x, y, ...) \ + x y; \ + fobj__missing_argument_detector fobj__nm_given(y); +#define fobj__mapArgs_toFields(...) \ + fm_eval(fm_foreach_tuple(fobj__mapArgs_toFields_do, __VA_ARGS__)) + +#define fobj__mapArgs_toNames_do(x, y, ...) y +#define fobj__mapArgs_toNames(...) \ + fm_eval(fm_foreach_tuple_comma(fobj__mapArgs_toNames_do, __VA_ARGS__)) + +#define fobj__mapArgs_toNamedParams_do(x, y, ...) params.y +#define fobj__mapArgs_toNamedParams(...) \ + fm_eval(fm_foreach_tuple_comma(fobj__mapArgs_toNamedParams_do, __VA_ARGS__)) + +#define fobj__assertArgs_do(x, y, ...) fobj__check_arg(params.y) +#define fobj__assertArgs(...) \ + fm_eval(fm_foreach_tuple(fobj__assertArgs_do, __VA_ARGS__)) + +#define fobj__special_void_method(meth) \ + \ + static ft_unused fobj_method_handle_t fobj__nm_mhandle(meth) (void) { \ + static volatile fobj_method_handle_t hndl = 0; \ + fobj_method_handle_t h = hndl; \ + if (h) return h; \ + fobj_method_init_impl(&hndl, fm_str(meth)); \ + return hndl; \ + } \ + \ + typedef void (* fobj__nm_impl_t(meth))(fobj_t self); \ + \ + ft_inline void \ + fobj__nm_register(meth)(fobj_klass_handle_t klass, fobj__nm_impl_t(meth) cb) { \ + fobj_method_register_impl(klass, fobj__nm_mhandle(meth)(), (void *)cb); \ + } \ + \ + ft_inline fobj__method_impl_box_t \ + fobj__nm_wrap_decl(meth)(fobj__nm_impl_t(meth) cb) { \ + return (fobj__method_impl_box_t) { fobj__nm_mhandle(meth)(), cb }; \ + } \ + \ + fm__dumb_require_semicolon + +#if defined(NDEBUG) || defined(__clang__) || defined(__TINYC__) +#define fobj__disposed fobj_disposed +#else +typedef struct fobj__header { +#define FOBJ__HEADER_MAGIC UINT64_C(0x1234567890abcdef) + uint64_t magic; + volatile uint32_t rc; + volatile uint16_t flags; + fobj_klass_handle_t klass; +} fobj__header_t; + +ft_inline bool +fobj__disposed(fobj_t self) { + fobj__header_t *h; + + ft_assert(self != NULL); + h = ((fobj__header_t*)self - 1); + assert(h->magic == FOBJ__HEADER_MAGIC); + return (__atomic_load_n(&h->flags, __ATOMIC_ACQUIRE) & 2) != 0; +} +#endif + +/* Klass declarations */ + +#define fobj__klass_declare(klass) \ + extern fobj_klass_handle_t fobj__nm_khandle(klass)(void) ft_gcc_const; \ + fm__dumb_require_semicolon + + +#define fobj__klass_handle(klass, ...) \ + fobj__klass_handle_i(klass, \ + fobj__map_params(fobj__nm_kls(klass)) \ + fm_va_comma(__VA_ARGS__) fobj__map_params(__VA_ARGS__)) +#define fobj__klass_handle_i(klass, ...) \ + fobj__klass_handle_impl(klass, __VA_ARGS__) +#define fobj__klass_handle_impl(klass, ...) \ + fobj_klass_handle_t fobj__nm_khandle(klass) (void) { \ + static volatile fobj_klass_handle_t hndl = 0; \ + fobj_klass_handle_t khandle = hndl; \ + fobj_klass_handle_t kparent = fobjBase__kh(); \ + ssize_t kls_size = sizeof(klass); \ + if (khandle) return khandle; \ + fm_eval_tuples_arg(fobj__klass_detect_size, klass, __VA_ARGS__) \ + { \ + fobj__method_impl_box_t methods[] = { \ + fobj__klass_decl_methods(klass, __VA_ARGS__) \ + { 0, NULL } \ + }; \ + if (fobj_klass_init_impl(&hndl, kls_size, kparent, methods, fm_str(klass))) \ + return hndl; \ + } \ + khandle = hndl; \ + fm_when(fm_isnt_empty(fobj__klass_has_iface(__VA_ARGS__))) ( \ + fobj__klass_check_iface(klass, __VA_ARGS__) \ + ) \ + return khandle; \ + } \ + fm__dumb_require_semicolon + +#define fobj__klass_detect_size_varsized_1(klass, fld, ...) \ + kls_size = -1-offsetof(klass,fld); +#define fobj__klass_detect_size_varsized_0(klass, ...) \ + kls_size = -1-sizeof(klass); +#define fobj__klass_detect_size_varsized(klass, ...) \ + fm_cat(fobj__klass_detect_size_varsized_, fm_va_01(__VA_ARGS__))(klass, __VA_ARGS__) +#define fobj__klass_detect_size_mth(...) +#define fobj__klass_detect_size_opt(...) +#define fobj__klass_detect_size_inherits(klass, parent) \ + kparent = fobj__nm_khandle(parent)(); +#define fobj__klass_detect_size_iface(...) +#define fobj__klass_detect_size(klass, tag, ...) \ + fobj__klass_detect_size_##tag (klass, __VA_ARGS__) + +#define fobj__method_init(meth) \ + fobj__consume(fobj__nm_mhandle(meth)()) +#define fobj__klass_init(klass) \ + fobj__consume(fobj__nm_khandle(klass)()) + +#define fobj__klass_decl_method(klass, meth, ...) \ + fobj__nm_wrap_decl(meth)(fobj__nm_klass_meth(klass, meth)), +#define fobj__klass_decl_method_loop(klass, ...) \ + fm_foreach_arg(fobj__klass_decl_method, klass, __VA_ARGS__) + +#define fobj__klass_decl_methods_mth(klass, ...) \ + fm_recurs(fobj__klass_decl_method_loop)(klass, __VA_ARGS__) +#define fobj__klass_decl_methods_opt(klass, ...) \ + fm_recurs(fobj__klass_decl_method_loop)(klass, __VA_ARGS__) +#define fobj__klass_decl_methods_varsized(...) +#define fobj__klass_decl_methods_inherits(klass, parent) +#define fobj__klass_decl_methods_iface(...) +#define fobj__klass_decl_methods_dispatch(klass, tag, ...) \ + fobj__klass_decl_methods_##tag(klass, __VA_ARGS__) +#define fobj__klass_decl_methods(klass, ...) \ + fm_eval(fm_foreach_tuple_arg(\ + fobj__klass_decl_methods_dispatch, klass, __VA_ARGS__)) + +#define fobj__klass_has_iface_varsized +#define fobj__klass_has_iface_mth +#define fobj__klass_has_iface_opt +#define fobj__klass_has_iface_inherits +#define fobj__klass_has_iface_iface 1 +#define fobj__klass_has_iface_impl(tag, ...) \ + fobj__klass_has_iface_##tag +#define fobj__klass_has_iface(...) \ + fm_eval_tuples(fobj__klass_has_iface_impl, __VA_ARGS__) + +#define fobj__klass_check_dispatch_varsized(...) +#define fobj__klass_check_dispatch_mth(...) +#define fobj__klass_check_dispatch_opt(...) +#define fobj__klass_check_dispatch_inherits(...) +#define fobj__klass_check_dispatch_iface(klass, ...) \ + fm_recurs(fobj__klass_check_dispatch_iface_i)(klass, __VA_ARGS__) +#define fobj__klass_check_dispatch_iface_i(klass, ...) \ + fm_foreach_arg(fobj__klass_check_one_iface, klass, __VA_ARGS__) +#define fobj__klass_check_one_iface(klass, iface) \ + fobj__nm_kvalidate(iface)(khandle); +#define fobj__klass_check_dispatch(klass, tag, ...) \ + fobj__klass_check_dispatch_##tag(klass, __VA_ARGS__) +#define fobj__klass_check_iface(klass, ...) \ + fm_eval_tuples_arg(fobj__klass_check_dispatch, klass, __VA_ARGS__) + +#define fobj__add_methods_loop(klass, ...) \ + fm_foreach_arg(fobj__add_methods_do, klass, __VA_ARGS__) +#define fobj__add_methods_do(klass, meth, ...) \ + fm_recurs(fobj__add_methods_do_)(klass, meth, ...) +#define fobj__add_methods_do_(klass, meth, ...) \ + fobj__nm_register(meth)(\ + fobj__nm_khandle(klass)(), \ + fobj__nm_klass_meth(klass, meth)); + +/* add methods after class declaration */ + +#define fobj__add_methods(klass, ...) do { \ + fobj_klass_handle_t khandle = fobj__nm_khandle(klass)(); \ + fm_eval(fobj__add_methods_loop(klass, __VA_ARGS__)) \ +} while (0) + +/* Instance creation */ +#define fobj__alloc(klass, ...) \ + fm_cat(fobj__alloc_, fm_va_01(__VA_ARGS__))(klass, fobj__nm_khandle(klass), -1, __VA_ARGS__) +#define fobj__alloc_sized(klass, size, ...) \ + fm_cat(fobj__alloc_, fm_va_01(__VA_ARGS__))(\ + klass, fobj__nm_khandle(klass), (size), __VA_ARGS__) +#define fobj__alloc_0(klass, khandle, size, ...) \ + ((klass *)fobj__allocate(khandle(), NULL, size)) +#define fobj__alloc_1(klass, khandle, size, ...) \ + ((klass *)fobj__allocate(khandle(), &(klass){__VA_ARGS__}, size)) + +/* Interface declaration */ + +#define fobj__iface_declare(iface) \ + fobj__iface_declare_i(iface, fobj__map_params(fobj__nm_iface(iface))) +#define fobj__iface_declare_i(iface, ...) \ + fobj__iface_declare_impl(iface, \ + fobj__nm_iface_i(iface), fobj__nm_bind(iface), \ + fobj__nm_bindref(iface), fobj__nm_implements(iface), \ + fobj__nm_kvalidate(iface), __VA_ARGS__) + +#define fobj__iface_declare_impl(iface, iface_i, \ + bind_iface, bindref_iface, implements_iface, \ + kvalidate, ...) \ + fobj__mapMethods_toHandlers(__VA_ARGS__) \ + typedef union iface_i { \ + fobj_t self; \ + fobj__mapMethods_toFields(__VA_ARGS__) \ + } iface_i; \ + \ + static ft_unused inline iface_i \ + bind_iface(fobj_t self) { \ + iface_i _iface = (iface_i){ .self = self }; \ + fobj__mapMethods_toSetters(__VA_ARGS__) \ + return _iface; \ + } \ + \ + static ft_unused inline bool \ + implements_iface(fobj_t self, iface_i *ifacep) { \ + iface_i _iface = (iface_i){ .self = self }; \ + bool all_ok = true; \ + fobj__mapMethods_toIfSetters(__VA_ARGS__) \ + if (ifacep != NULL) \ + *ifacep = all_ok ? _iface : (iface_i){NULL}; \ + return all_ok; \ + } \ + \ + static ft_unused inline iface_i \ + bindref_iface(fobj_t self) { \ + iface_i _iface = bind_iface(self); \ + fobj_ref(_iface.self); \ + return _iface; \ + } \ + \ + ft_inline void \ + kvalidate(fobj_klass_handle_t khandle) { \ + fobj__kvalidateMethods(__VA_ARGS__) \ + } \ + \ + fm__dumb_require_semicolon + +#ifndef NDEBUG +#define fobj_reduce(newifacetype, oldiface) ({ \ + if (0) { \ + __typeof(oldiface) _old_iface_ ft_unused = {NULL}; \ + fobj__nm_iface_i(newifacetype) _new_iface_ ft_unused = {NULL}; \ + fobj__mapMethods_toCopyChecks(newifacetype) \ + } \ + ((fobj__nm_iface_i(newifacetype)){.self = (oldiface).self}); \ +}) +#else +#define fobj_reduce(newifacetype, oldiface) \ + ((fobj__nm_iface_i(newifacetype)){.self = (oldiface).self}) +#endif + +#define fobj__mapMethods_toCopyChecks_do_opt(meth) +#define fobj__mapMethods_toCopyChecks_do_mth(meth) \ + _new_iface_.fobj__nm_has(meth) = _old_iface_.fobj__nm_has(meth); +#define fobj__mapMethods_toCopyChecks_loop(tag, ...) \ + fm_foreach(fobj__mapMethods_toCopyChecks_do_##tag, __VA_ARGS__) +#define fobj__mapMethods_toCopyChecks_do(tag, ...) \ + fm_recurs(fobj__mapMethods_toCopyChecks_loop)(tag, __VA_ARGS__) +#define fobj__mapMethods_toCopyChecks_i(...) \ + fm_foreach_tuple(fobj__mapMethods_toCopyChecks_do, __VA_ARGS__) +#define fobj__mapMethods_toCopyChecks_i1(iface, def) \ + fm_eval(fobj__mapMethods_toCopyChecks_i(def)) +#define fobj__mapMethods_toCopyChecks(iface) \ + fobj__mapMethods_toCopyChecks_i1(iface, \ + fm_expand fm_if(fobj__macroIsIface(iface), \ + (fobj__map_params(fobj__nm_iface(iface))), \ + ((mth, iface)))) + +#define fobj__macroIsIface(iface) \ + fm_is_empty(fm_eval(fobj__macroIsIface_i(fobj__nm_iface(iface)))) +#define fobj__macroIsIface_mth(...) +#define fobj__macroIsIface_opt(...) +#define fobj__macroIsIface_do(x) \ + fobj__macroIsIface_##x +#define fobj__macroIsIface_i(...) \ + fm_foreach(fobj__macroIsIface_do, __VA_ARGS__) + +#define fobj__mapMethods_toHandlers_do_do(m) \ + fobj__predefine_method(m); +#define fobj__mapMethods_toHandlers_loop(...) \ + fm_foreach(fobj__mapMethods_toHandlers_do_do, __VA_ARGS__) +#define fobj__mapMethods_toHandlers_do(tag, ...) \ + fm_recurs(fobj__mapMethods_toHandlers_loop)(__VA_ARGS__) +#define fobj__mapMethods_toHandlers(...) \ + fm_eval_tuples(fobj__mapMethods_toHandlers_do, __VA_ARGS__) + + +#define fobj__mapMethods_toFields_do_do(m) uintptr_t fobj__nm_has(m); +#define fobj__mapMethods_toFields_loop(...) \ + fm_foreach(fobj__mapMethods_toFields_do_do, __VA_ARGS__) +#define fobj__mapMethods_toFields_do(tag, ...) \ + fm_recurs(fobj__mapMethods_toFields_loop)(__VA_ARGS__) +#define fobj__mapMethods_toFields(...) \ + fm_eval_tuples(fobj__mapMethods_toFields_do, __VA_ARGS__) + +#define fobj__mapMethods_toSetters_do_opt(meth) +#define fobj__mapMethods_toSetters_do_mth(meth) \ + ft_assert(fobj_method_implements(self, fobj__nm_mhandle(meth)())); +#define fobj__mapMethods_toSetters_loop(tag, ...) \ + fm_foreach(fobj__mapMethods_toSetters_do_##tag, __VA_ARGS__) +#define fobj__mapMethods_toSetters_do(tag, ...) \ + fm_recurs(fobj__mapMethods_toSetters_loop)(tag, __VA_ARGS__) +#define fobj__mapMethods_toSetters(...) \ + fm_eval_tuples(fobj__mapMethods_toSetters_do, __VA_ARGS__) + +#define fobj__mapMethods_toIfSetters_do_opt(meth) +#define fobj__mapMethods_toIfSetters_do_mth(meth) \ + if (!fobj_method_implements(self, fobj__nm_mhandle(meth)())) all_ok = false; +#define fobj__mapMethods_toIfSetters_loop(tag, ...) \ + fm_foreach(fobj__mapMethods_toIfSetters_do_##tag, __VA_ARGS__) +#define fobj__mapMethods_toIfSetters_do(tag, ...) \ + fm_recurs(fobj__mapMethods_toIfSetters_loop)(tag, __VA_ARGS__) +#define fobj__mapMethods_toIfSetters(...) \ + fm_eval_tuples(fobj__mapMethods_toIfSetters_do, __VA_ARGS__) + +#define fobj__kvalidateMethods_do_opt(meth) +#define fobj__kvalidateMethods_do_mth(meth) \ + ft_assert(fobj_klass_method_search(khandle, fobj__nm_mhandle(meth)()) != NULL); +#define fobj__kvalidateMethods_loop(tag, ...) \ + fm_foreach(fobj__kvalidateMethods_do_##tag, __VA_ARGS__) +#define fobj__kvalidateMethods_do(tag, ...) \ + fm_recurs(fobj__kvalidateMethods_loop)(tag, __VA_ARGS__) +#define fobj__kvalidateMethods(...) \ + fm_eval_tuples(fobj__kvalidateMethods_do, __VA_ARGS__) + +/* Method invocation */ + +#define fobj_call(meth, self, ...) \ + fobj__nm_invoke(meth)(fobj__nm_cb(meth)(self, fobj_self_klass), fobj_pass_params(meth, __VA_ARGS__)) + +#define fobj_call_super(meth, _klassh, self, ...) \ + fobj__nm_invoke(meth)(fobj__nm_cb(meth)(self, _klassh), fobj_pass_params(meth, __VA_ARGS__)) + +#define fobj_iface_call(meth, iface, ...) \ + fobj_call(meth, (fobj_t)(iface).fobj__nm_has(meth), __VA_ARGS__) + +#define fobj_cb_fastcall(cb, ...) \ + (cb).impl((cb).self, __VA_ARGS__) + +#define fobj__implements(iface, self, ...) \ + (fobj__nm_implements(iface)(self, fm_if(fm_no_va(__VA_ARGS__), NULL, __VA_ARGS__))) + +#define fobj_iface_filled(meth, iface) \ + (fobj__nm_implements(meth)((fobj_t)(iface).fobj__nm_has(meth), NULL)) + +#define fobj_ifdef(assignment, meth, self, ...) \ + fobj__ifdef_impl(assignment, meth, (self), \ + fm_uniq(cb), fobj__nm_cb(meth), fobj__nm_cb_t(meth), \ + fobj__nm_invoke(meth), __VA_ARGS__) +#define fobj__ifdef_impl(assignment, meth, self_, cb, cb_meth, cb_meth_t, \ + invoke_meth__params, ...) ({ \ + cb_meth_t cb = cb_meth(self_, fobj_self_klass); \ + if (cb.impl != NULL) { \ + assignment invoke_meth__params(cb, fobj_pass_params(meth, __VA_ARGS__)); \ + } \ + cb.impl != NULL; \ + }) + +#define fobj_iface_ifdef(assignment, meth, iface, ...) \ + fobj_ifdef(assignment, meth, (fobj_t)(iface).fobj__nm_has(meth), __VA_ARGS__) + +/* Named params passing hazzles with optional and defaults */ + +#define fobj_pass_params(meth, ...) \ + fm_cat(fobj__pass_params_impl_, fm_no_va(__VA_ARGS__))( \ + meth, fobj__nm_params_t(meth), __VA_ARGS__) +#define fobj__pass_params_impl_1(meth, meth__params_t, ...) \ + ((meth__params_t){fobj__params_defaults(meth)}) +#ifndef __clang__ +#define fobj__pass_params_impl_0(meth, meth__params_t, ...) \ + ((meth__params_t){\ + fobj__params_defaults(meth), \ + fm_eval(fm_foreach_comma(fobj__pass_params_each, __VA_ARGS__)) \ + }) +#else +#define fobj__pass_params_impl_0(meth, meth__params_t, ...) \ + ({ \ + fobj__push_ignore_initializer_overrides; \ + meth__params_t _this_is_params = { \ + fobj__params_defaults(meth), \ + fm_eval(fm_foreach_comma(fobj__pass_params_each, __VA_ARGS__)) \ + }; \ + fobj__pop_ignore_initializer_overrides; \ + _this_is_params; \ + }) +#endif + +#define fobj__pass_params_each(param) \ + param, fobj__dumb_arg + +#define fobj__params_defaults(meth) \ + fobj__params_defaults_i(meth, fobj__nm_mthdflt(meth)()) \ + .fobj__dumb_first_param = fobj__dumb_arg +#define fobj__params_defaults_i(meth, ...) \ + fm_when(fm_is_tuple(fm_head(__VA_ARGS__))) ( \ + fobj__params_defaults_impl(__VA_ARGS__) \ + ) +#define fobj__params_defaults_impl(...) \ + fm_eval(fm_foreach_tuple(fobj__params_defaults_each, __VA_ARGS__)) +#define fobj__params_defaults_each(x, ...) \ + fm_when(fm_isnt_empty(__VA_ARGS__))( .x = __VA_ARGS__, )\ + .fobj__nm_given(x) = fobj__dumb_arg, + + +#define fobj_bind(iface, obj) fobj__nm_bind(iface)(obj) + +/* Declarations "private" implementation functions */ +extern bool fobj_method_init_impl(volatile fobj_method_handle_t *meth, + const char *name); +extern void fobj_method_register_impl(fobj_klass_handle_t klass, + fobj_method_handle_t meth, + void* impl); +extern bool fobj_klass_init_impl(volatile fobj_klass_handle_t *klass, + ssize_t size, + fobj_klass_handle_t parent, + fobj__method_impl_box_t *methods, + const char *name); +extern void* fobj__allocate(fobj_klass_handle_t klass, + void *init, + ssize_t size); + +/* helper function to consume value to disable compiler optimizations */ +extern void fobj__consume(uint16_t); + +typedef struct fobj__method_callback { + fobj_t self; + void* impl; +} fobj__method_callback_t; +extern fobj__method_callback_t fobj_method_search(fobj_t self, + fobj_method_handle_t meth, + fobj_klass_handle_t for_child_take_parent); + +extern bool fobj_method_implements(fobj_t self, + fobj_method_handle_t meth); + +extern void* fobj_klass_method_search(fobj_klass_handle_t klass, + fobj_method_handle_t meth); + +/* Variable set helpers */ + +#ifndef NDEBUG +#define fobj__set_impl(ptr, obj) do { \ + __typeof(&(**ptr)) fm_uniq(_validate_ptrptr_) ft_unused = NULL; \ + fobj_set((void**)(ptr), (obj)); \ +} while(0) +#define fobj__swap_impl(ptr, obj) ({ \ + __typeof(&(**ptr)) fm_uniq(_validate_ptrptr_) ft_unused = NULL; \ + fobj_swap((void**)(ptr), (obj)); \ +}) +#define fobj__del_impl(ptr) do { \ + __typeof(&(**ptr)) fm_uniq(_validate_ptrptr_) ft_unused = NULL; \ + fobj_del((void**)(ptr)); \ +} while (0) +#else +#define fobj__set_impl(ptr, obj) fobj_set((void**)(ptr), (obj)) +#define fobj__swap_impl(ptr, obj) fobj_swap((void**)(ptr), (obj)) +#define fobj__del_impl(ptr) fobj_del((void**)(ptr)) +#endif + +#define fobj__iref(iface) ((__typeof(iface)){.self=fobj_ref((iface).self)}) +#define fobj__iunref(iface) ((__typeof(iface)){.self=fobj_unref((iface).self)}) +#ifndef NDEBUG +#define fobj__iset(ptr, iface) do { \ + __typeof(*(ptr)) fm_uniq(_validate_ptr_) ft_unused = (__typeof(iface)){}; \ + fobj_set(&(ptr)->self, (iface).self); \ +} while (0) +#define fobj__iswap(ptr, iface) ({ \ + __typeof(*(ptr)) fm_uniq(_validate_ptr_) ft_unused = (__typeof(iface)){}; \ + (__typeof(iface)){.self=fobj_swap(&(ptr)->self, (iface).self)}; \ +}) +#else +#define fobj__iset(ptr, iface) \ + fobj_set(&(ptr)->self, (iface).self) +#define fobj__iswap(ptr, iface) \ + ((__typeof(iface)){.self=fobj_swap(&(ptr)->self, (iface).self)}) +#endif +#define fobj__idel(iface) fobj_del((void*)&(iface)->self) + +#define fobj__isave(iface) ((__typeof(iface)){.self = $save((iface).self)}) +#define fobj__iresult(iface) ((__typeof(iface)){.self = $result((iface).self)}) +#define fobj__ireturn(iface) return $iresult(iface) + +/* Autorelease pool handling */ + +#define FOBJ_AR_CHUNK_SIZE 14 +typedef struct fobj_autorelease_chunk fobj_autorelease_chunk; +struct fobj_autorelease_chunk { + fobj_autorelease_chunk *prev; + uint32_t cnt; + fobj_t refs[FOBJ_AR_CHUNK_SIZE]; +}; +typedef struct fobj__autorelease_pool_ref fobj__autorelease_pool_ref; +typedef struct fobj_autorelease_pool fobj_autorelease_pool; +struct fobj__autorelease_pool_ref { + fobj_autorelease_pool *parent; + fobj_autorelease_pool **root; +}; +struct fobj_autorelease_pool { + struct fobj__autorelease_pool_ref ref; + fobj_autorelease_chunk *last; + fobj_autorelease_chunk first; +}; + +extern fobj__autorelease_pool_ref fobj_autorelease_pool_init(fobj_autorelease_pool *pool); +extern void fobj_autorelease_pool_release(fobj_autorelease_pool *pool); +extern fobj_t fobj_store_to_parent_pool(fobj_t obj, + fobj_autorelease_pool *child_pool_or_null); + +#define FOBJ_ARP_POOL(name) \ + fobj_autorelease_pool __attribute__((cleanup(fobj_autorelease_pool_release))) \ + name = {fobj_autorelease_pool_init(&name), &name.first} + + +/******************************** + * ERROR + */ +typedef struct fobj_err_kv { + const char *key; + ft_arg_t val; +} fobj_err_kv_t; + +#define fobj__error_kind(err) \ + ft_inline const char* fobj_error_kind_##err(void) { return #err; } + +#define fobj__error_flag_key(key) \ + ft_inline fobj_err_kv_t fobj__err_mkkv_##key(void) { \ + return (fobj_err_kv_t){#key, ft_mka_z()}; \ + } \ + ft_inline bool fobj__err_getkv_##key(err_i err, bool *found) { \ + bool fnd; \ + fobj_err_getkv(err, #key, ft_mka_z(), &fnd)); \ + if (found) *found = fnd; \ + return fnd; \ + } + +#define fobj__error_int_key(key) \ + ft_inline fobj_err_kv_t fobj__err_mkkv_##key(int64_t v) { \ + return (fobj_err_kv_t){#key, ft_mka_i(v)}; \ + } \ + ft_inline int64_t fobj__err_getkv_##key(err_i err, bool *found) { \ + return ft_arg_i(fobj_err_getkv(err, #key, ft_mka_i(0), found)); \ + } + +#define fobj__error_uint_key(key) \ + ft_inline fobj_err_kv_t fobj__err_mkkv_##key(uint64_t v) { \ + return (fobj_err_kv_t){#key, ft_mka_u(v)}; \ + } \ + ft_inline uint64_t fobj__err_getkv_##key(err_i err, bool *found) { \ + return ft_arg_u(fobj_err_getkv(err, #key, ft_mka_u(0), found)); \ + } + +#define fobj__error_cstr_key(key) \ + ft_inline fobj_err_kv_t fobj__err_mkkv_##key(const char* v) { \ + return (fobj_err_kv_t){#key, ft_mka_s((char*)v)}; \ + } \ + ft_inline const char* fobj__err_getkv_##key(err_i err, bool *found) { \ + return ft_arg_s(fobj_err_getkv(err, #key, ft_mka_s(NULL), found)); \ + } + +#define fobj__error_float_key(key) \ + ft_inline fobj_err_kv_t fobj__err_mkkv_##key(double v) { \ + return (fobj_err_kv_t){#key, ft_mka_f(v)}; \ + } \ + ft_inline double fobj__err_getkv_##key(err_i err, bool *found) { \ + return ft_arg_f(fobj_err_getkv(err, #key, ft_mka_f(0), found)); \ + } + +#define fobj__error_bool_key(key) \ + ft_inline fobj_err_kv_t fobj__err_mkkv_##key(bool v) { \ + return (fobj_err_kv_t){#key, ft_mka_b(v)}; \ + } \ + ft_inline bool fobj__err_getkv_##key(err_i err, bool *found) { \ + return ft_arg_b(fobj_err_getkv(err, #key, ft_mka_b(false), found)); \ + } + +#define fobj__error_object_key(key) \ + ft_inline fobj_err_kv_t fobj__err_mkkv_##key(fobj_t v) { \ + return (fobj_err_kv_t){#key, ft_mka_o(v)}; \ + } \ + ft_inline fobj_t fobj__err_getkv_##key(err_i err, bool *found) { \ + return ft_arg_o(fobj_err_getkv(err, #key, ft_mka_o(NULL), found)); \ + } + +#endif diff --git a/src/fu_util/impl/fo_impl2.h b/src/fu_util/impl/fo_impl2.h new file mode 100644 index 000000000..093260ae0 --- /dev/null +++ b/src/fu_util/impl/fo_impl2.h @@ -0,0 +1,200 @@ +/* vim: set expandtab autoindent cindent ts=4 sw=4 sts=4 */ +#ifndef FOBJ_OBJ_PRIV2_H +#define FOBJ_OBJ_PRIV2_H + +#include +#include +#include + +ft_inline fobjStr* +fobj_str(const char* s) { + return fobj_newstr(ft_cstr(s), false); +} + +ft_inline fobjStr* +fobj_strbuf_steal(ft_strbuf_t *buf) { + fobjStr* str = fobj_newstr(ft_strbuf_ref(buf), buf->alloced); + *buf = (ft_strbuf_t){NULL}; + return str; +} + +ft_inline ft_str_t +fobj_getstr(fobjStr *str) { + return ft_str(str->ptr, str->len); +} + +ft_inline fobjStr* +fobj_strcatc(fobjStr *ostr, const char *str) { + return fobj_strcat(ostr, ft_cstr(str)); +} + +ft_inline fobjStr* +fobj_strcatc2(fobjStr *ostr, const char *str1, const char *str2) { + /* a bit lazy to do it in a fast way */ + return fobj_strcatf(ostr, "%s%s", str1, str2); +} + +ft_inline fobjStr* +fobj_stradd(fobjStr *ostr, fobjStr *other) { + return fobj_strcat(ostr, fobj_getstr(other)); +} + +ft_inline bool +fobj_streq(fobjStr* self, fobjStr *oth) { + return ft_streq(fobj_getstr(self), fobj_getstr(oth)); +} + +ft_inline FT_CMP_RES +fobj_strcmp(fobjStr* self, fobjStr *oth) { + return ft_strcmp(fobj_getstr(self), fobj_getstr(oth)); +} + +ft_inline bool +fobj_streq_str(fobjStr* self, ft_str_t oth) { + return ft_streq(fobj_getstr(self), oth); +} + +ft_inline FT_CMP_RES +fobj_strcmp_str(fobjStr* self, ft_str_t oth) { + return ft_strcmp(fobj_getstr(self), oth); +} + +ft_inline bool +fobj_streq_c(fobjStr* self, const char *oth) { + return ft_streqc(fobj_getstr(self), oth); +} + +ft_inline FT_CMP_RES +fobj_strcmp_c(fobjStr* self, const char *oth) { + return ft_strcmpc(fobj_getstr(self), oth); +} + +ft_inline fobjInt* +fobj_int(int64_t i) { + return $alloc(fobjInt, .i = i); +} + +ft_inline fobjUInt* +fobj_uint(uint64_t u) { + return $alloc(fobjUInt, .u = u); +} + +ft_inline fobjFloat* +fobj_float(double f) { + return $alloc(fobjFloat, .f = f); +} + +ft_inline fobjBool* +fobj_bool(bool b) { + return $alloc(fobjBool, .b = b); +} + +typedef struct fobjErr fobjErr; +struct fobjErr { + const char* type; + const char* message; + ft_source_position_t src; + fobjErr* sibling; /* sibling error */ + fobj_err_kv_t kv[]; +}; + +#define fobj_make_err(type, ...) \ + fm_cat(fobj_make_err_, fm_va_012(__VA_ARGS__))(type, __VA_ARGS__) +#define fobj_make_err_0(type, ...) ({ \ + fobj__make_err(fobj_error_kind_##type(), \ + ft__srcpos(), "Unspecified Error", NULL, 0); \ +}) +#define fobj_make_err_1(type, msg) ({ \ + fobj__make_err(fobj_error_kind_##type(), \ + ft__srcpos(), msg, NULL, 0); \ +}) +#define fobj_make_err_2(type, msg, ...) ({ \ + fobj_err_kv_t kvs[] = { \ + fobj__err_transform_kv(__VA_ARGS__) \ + }; \ + fobj__make_err(fobj_error_kind_##type(), \ + ft__srcpos(), msg, \ + kvs, ft_arrsz(kvs)); \ +}) + +#define fobj_make_syserr( ...) \ + fm_cat(fobj_make_syserr_, fm_va_01(__VA_ARGS__))(__VA_ARGS__) +#define fobj_make_syserr_0(...) ({ \ + fobj_err_kv_t kvs[] = { \ + {"errNo", ft_mka_i(errno)}, \ + {"errStr", ft_mka_s((char*)ft_strerror(errno))}, \ + }; \ + fobj__make_err(fobj_error_kind_SysErr(), \ + ft__srcpos(), "System Error: {errStr}", \ + kvs, ft_arrsz(kvs));\ +}) +#define fobj_make_syserr_1(msg, ...) ({ \ + fobj_err_kv_t kvs[] = { \ + {"errNo", ft_mka_i(errno)}, \ + {"errStr", ft_mka_s((char*)ft_strerror(errno))}, \ + {"__msgSuffix", ft_mka_s((char*)": {errStr}")}, \ + fobj__err_transform_kv(__VA_ARGS__) \ + }; \ + fobj__make_err(fobj_error_kind_SysErr(), \ + ft__srcpos(), msg, \ + kvs, ft_arrsz(kvs));\ +}) + +extern err_i fobj__make_err(const char *type, + ft_source_position_t src, + const char *msg, + fobj_err_kv_t *kvs, + size_t kvn); + +#define fobj__err_transform_kv_do(key, ...) \ + fobj__err_mkkv_##key(__VA_ARGS__) +#define fobj__err_transform_kv(...) \ + fm_eval_tuples_comma(fobj__err_transform_kv_do, __VA_ARGS__) + +#define fobj__err_getkey(key, err, ...) \ + fobj__err_getkv_##key(err, fm_or_default(__VA_ARGS__)(NULL)) + +ft_inline int +getErrno(err_i err) { + return $errkey(errNo, err); +} + +ft_inline const char* +getErrnoStr(err_i err) { + return $errkey(errStr, err); +} + +ft_inline const char* +fobj_errtype(err_i err) { + fobjErr* self = (fobjErr*)(err.self); + ft_assert(fobj_real_klass_of(self) == fobjErr__kh()); \ + return self->type ? self->type : "RT"; +} + +ft_inline const char* +fobj_errmsg(err_i err) { + fobjErr* self = (fobjErr*)(err.self); + ft_assert(fobj_real_klass_of(self) == fobjErr__kh()); \ + return self->message ? self->message : "Unspecified Error"; +} + +ft_inline ft_source_position_t +fobj_errsrc(err_i err) { + fobjErr* self = (fobjErr*)(err.self); + ft_assert(fobj_real_klass_of(self) == fobjErr__kh()); \ + return self->src; +} + +#define fobj__printkv(fmt, ...) ({ \ + fobj_kv kvs[] = { \ + fobj__transform_fokv(__VA_ARGS__) \ + }; \ + fobj_printkv(fmt, ft_slc_fokv_make(kvs, ft_arrsz(kvs))); \ +}) + +#define fobj__transform_fokv_do(key, val) \ + { #key, val } +#define fobj__transform_fokv(...) \ + fm_eval_tuples_comma(fobj__transform_fokv_do, __VA_ARGS__) + +#endif // FOBJ_OBJ_PRIV2_H diff --git a/src/fu_util/impl/ft_impl.c b/src/fu_util/impl/ft_impl.c new file mode 100644 index 000000000..097171e86 --- /dev/null +++ b/src/fu_util/impl/ft_impl.c @@ -0,0 +1,593 @@ +/* vim: set expandtab autoindent cindent ts=4 sw=4 sts=4 */ +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef HAVE_LIBBACKTRACE +#include +#else +#include +#endif + +#ifdef WIN32 +#define __thread __declspec(thread) +#else +#include +#endif + +#include + +#define FT_LOG_MAX_FILES (1<<12) + +static void * (*_ft_realloc) (void *, size_t) = realloc; +static void (*_ft_free) (void *) = free; + +void ft_set_allocators( + void *(*_realloc)(void *, size_t), + void (*_free)(void*)) { + _ft_realloc = _realloc ? _realloc : realloc; + _ft_free = _free ? _free : free; +} + +void* +ft_calloc(size_t size) { + void * res = ft_malloc(size); + ft_memzero(res, size); + return res; +} + +void* +ft_realloc(void *oldptr, size_t size) { + if (size) { + void *res = _ft_realloc(oldptr, size); + ft_assert(res, "ft_realloc failed: oldptr=%p size=%zd", oldptr, size); + return res; + } + if (oldptr) + _ft_free(oldptr); + return NULL; +} + +void* +ft_realloc_arr(void* ptr, size_t elem_sz, size_t old_elems, size_t new_elems) { + ptr = ft_realloc(ptr, ft_mul_size(elem_sz, new_elems)); + if (new_elems > old_elems) + ft_memzero((char*)ptr + elem_sz * old_elems, + elem_sz * (new_elems - old_elems)); + return ptr; +} + +#define MEMZERO_BLOCK 4096 +static const uint8_t zero[4096] = {0}; +void +ft_memzero(void *_ptr, size_t sz) { + uint8_t* ptr = _ptr; + uintptr_t ptri = (uintptr_t)ptr; + uintptr_t diff; + + if (ptri & (MEMZERO_BLOCK-1)) { + diff = MEMZERO_BLOCK - (ptri & (MEMZERO_BLOCK-1)); + if (diff > sz) + diff = sz; + memset(ptr, 0, diff); + ptr += diff; + sz -= diff; + } + + /* Do not dirty page if it clear */ + while (sz >= MEMZERO_BLOCK) { + if (memcmp(ptr, zero, MEMZERO_BLOCK) != 0) { + memset(ptr, 0, MEMZERO_BLOCK); + } + ptr += MEMZERO_BLOCK; + sz -= MEMZERO_BLOCK; + } + + if (sz) + memset(ptr, 0, sz); +} + +/* String utils */ + +size_t +ft_strlcat(char *dest, const char* src, size_t dest_size) { + char* dest_null = memchr(dest, 0, dest_size); + size_t dest_len = dest_null ? dest_null - dest : dest_size; + ft_assert(dest_null, "destination has no zero byte"); + if (dest_len < dest_size-1) { + size_t cpy_len = dest_size - dest_len - 1; + strncpy(dest+dest_len, src, cpy_len); + dest[dest_len + cpy_len] = '\0'; + } + return dest_len + strlen(src); +} + +ft_str_t +ft_vasprintf(const char *fmt, va_list args) { + ft_strbuf_t buf = ft_strbuf_zero(); + bool err; + + ft_strbuf_vcatf_err(&buf, &err, fmt, args); + + if (err) { + ft_strbuf_free(&buf); + return ft_str(NULL, 0); + } + return ft_strbuf_steal(&buf); +} + +ft_str_t +ft_asprintf(const char *fmt, ...) { + ft_strbuf_t buf = ft_strbuf_zero(); + bool err; + va_list args; + + va_start(args, fmt); + ft_strbuf_vcatf_err(&buf, &err, fmt, args); + va_end(args); + + if (err) { + ft_strbuf_free(&buf); + return ft_str(NULL, 0); + } + return ft_strbuf_steal(&buf); +} + +bool +ft__strbuf_ensure(ft_strbuf_t *buf, size_t n) { + size_t new_len; + size_t new_cap; + bool overflowed = false; + ft_assert(!buf->fixed); + ft_assert(buf->cap < ft_add_size(buf->len, n)); + /* 4GB string limit */ + ft_assert(buf->len + n <= UINT32_MAX); + new_len = buf->len + n; + if (new_len > UINT32_MAX) { + new_len = UINT32_MAX; + overflowed = true; + } + new_cap = ft_nextpow2(new_len); + if (buf->alloced) + buf->ptr = ft_realloc(buf->ptr, new_cap); + else { + char* newbuf = ft_malloc(new_cap); + memcpy(newbuf, buf->ptr, (size_t)buf->len+1); + buf->ptr = newbuf; + } + buf->cap = new_cap-1; + buf->alloced = true; + buf->fixed = overflowed; + return !overflowed; +} + +extern bool +ft_strbuf_vcatf_err(ft_strbuf_t *buf, bool err[1], const char *fmt, va_list args) { + int save_errno = errno; + char localbuf[256] = ""; + char *str = NULL; + size_t init_len = buf->len; + ssize_t len, need_len; + bool overflowed = false; + va_list argcpy; + + if (!ft_strbuf_may(buf)) + return false; + + err[0] = false; + + va_copy(argcpy, args); + need_len = vsnprintf(localbuf, ft_arrsz(localbuf), fmt, argcpy); + va_end(argcpy); + + if (need_len < 0) { + err[0] = true; + return true; + } + + if (need_len < ft_arrsz(localbuf)) { + return ft_strbuf_cat(buf, ft_str(localbuf, need_len)); + } + + for (;;) { + len = need_len; + if (!ft_strbuf_ensure(buf, len)) { + len = buf->cap - buf->len; + overflowed = true; + } + str = buf->ptr + init_len; + + errno = save_errno; + va_copy(argcpy, args); + need_len = vsnprintf(str, len+1, fmt, argcpy); + va_end(argcpy); + + if (need_len < 0) { + buf->ptr[buf->len] = '0'; + err[0] = true; + return true; + } + + if (need_len <= len) { + buf->len += need_len; + return ft_strbuf_may(buf); + } + if (overflowed) { + buf->len = buf->cap; + return false; + } + } +} + +bool +ft_strbuf_vcatf(ft_strbuf_t *buf, const char *fmt, va_list args) { + bool err = false; + bool may_continue = ft_strbuf_vcatf_err(buf, &err, fmt, args); + if (err) + ft_log(FT_ERROR, "error printing format '%s'", fmt); + return may_continue; +} + +bool +ft_strbuf_catf(ft_strbuf_t *buf, const char *fmt, ...) { + bool err = false; + bool may_continue; + va_list args; + + va_start(args, fmt); + may_continue = ft_strbuf_vcatf_err(buf, &err, fmt, args); + va_end(args); + + if (err) + ft_log(FT_ERROR, "error printing format '%s'", fmt); + + return may_continue; +} + +/* Time */ +double +ft_time(void) { + struct timeval tv = {0, 0}; + ft_assyscall(gettimeofday(&tv, NULL)); + return (double)tv.tv_sec + (double)tv.tv_usec/1e6; +} + +/* Logging */ + +/* +static _Noreturn void +ft_quick_exit(const char* msg) { + write(STDERR_FILENO, msg, strlen(msg)); + abort(); +} +*/ + +static const char *ft_log_main_file = __FILE__; +const char* +ft__truncate_log_filename(const char *file) { + const char *me = ft_log_main_file; + const char *he = file; + for (;*he && *me && *he==*me;he++, me++) { +#ifndef WIN32 + if (*he == '/') + file = he+1; +#else + if (*he == '/' || *he == '\\') + file = he+1; +#endif + } + return file; +} + +static const char* +ft__base_log_filename(const char *file) { + const char *he = file; + for (;*he;he++) { +#ifndef WIN32 + if (*he == '/') + file = he+1; +#else + if (*he == '/' || *he == '\\') + file = he+1; +#endif + } + return file; +} + +#ifdef HAVE_LIBBACKTRACE +static struct backtrace_state * volatile ft_btstate = NULL; +static pthread_once_t ft_btstate_once = PTHREAD_ONCE_INIT; + +static void +ft_backtrace_init(void) { + __atomic_store_n(&ft_btstate, backtrace_create_state(NULL, 0, NULL, NULL), + __ATOMIC_RELEASE); +} + +static int +ft_backtrace_add(void *data, uintptr_t pc, + const char* filename, int lineno, + const char *function) { + struct ft_strbuf_t *buf = data; + ssize_t sz; + if (filename == NULL) + return 1; + return ft_strbuf_catf(buf, "\n%s:%-4d %s", + ft__truncate_log_filename(filename), lineno, function); +} +#endif + +static void ft_gnu_printf(4,0) +ft_default_log(enum FT_LOG_LEVEL level, ft_source_position_t srcpos, + const char* error, const char *fmt, va_list args) { +#define LOGMSG_SIZE (1<<12) + char buffer[LOGMSG_SIZE] = {0}; + ft_strbuf_t buf = ft_strbuf_init_fixed(buffer, LOGMSG_SIZE); + bool err; + double now; + + now = ft_time(); + ft_strbuf_catf(&buf, "%.3f %d [%s]", now, getpid(), ft_log_level_str(level)); + + if (level <= FT_DEBUG || level >= FT_ERROR) { + ft_strbuf_catf(&buf, " (%s@%s:%d)", srcpos.func, srcpos.file, srcpos.line); + } + + ft_strbuf_catc(&buf, " > "); + ft_strbuf_vcatf_err(&buf, &err, fmt, args); + if (err) { + ft_strbuf_catc(&buf, "<>"); + } + + if (error != NULL) { + ft_strbuf_catc(&buf, ": "); + ft_strbuf_catc(&buf, error); + } + + if (!ft_strbuf_may(&buf)) + goto done; + + if (level == FT_ERROR || level == FT_FATAL) { +#ifdef HAVE_LIBBACKTRACE + if (__atomic_load_n(&ft_btstate, __ATOMIC_ACQUIRE) == NULL) + pthread_once(&ft_btstate_once, ft_backtrace_init); + + backtrace_full(ft_btstate, 1, ft_backtrace_add, NULL, &buf); +#else + void *backtr[32] = {0}; + char **syms = NULL; + int i, n; + n = backtrace(backtr, 32); + syms = backtrace_symbols(backtr, n); + if (syms != NULL) { + for (i = 1; i < n; i++) { + ft_strbuf_cat1(&buf, '\n'); + ft_strbuf_catc(&buf, syms[i]); + } + free(syms); + } +#endif + } + +done: + if (!ft_strbuf_may(&buf)) { + buf.ptr[buf.len-3] = '.'; + buf.ptr[buf.len-2] = '.'; + buf.ptr[buf.len-1] = '.'; + } + + fprintf(stderr, "%s\n", buffer); +} + +static ft_gnu_printf(4,0) +void (*ft_log_hook)(enum FT_LOG_LEVEL, ft_source_position_t, const char*, const char *fmt, va_list args) = ft_default_log; + +void +ft__init_log(ft_log_hook_t hook, const char *file) { + ft_log_hook = hook == NULL ? ft_default_log : hook; + ft_log_main_file = file == NULL ? __FILE__ : file; +} + +void +ft__log(enum FT_LOG_LEVEL level, ft_source_position_t srcpos, + const char* error, const char *fmt, ...) { + va_list args; + srcpos.file = ft__truncate_log_filename(srcpos.file); + va_start(args, fmt); + ft_log_hook(level, srcpos, error, fmt, args); + va_end(args); +} + +extern _Noreturn void +ft__log_fatal(ft_source_position_t srcpos, const char* error, + const char *fmt, ...) { + va_list args; + va_start(args, fmt); + ft_log_hook(FT_FATAL, srcpos, error, fmt, args); + va_end(args); + abort(); +} + +const char* +ft__strerror(int eno, char *buf, size_t len) { +#if !_GNU_SOURCE && (_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) + int saveno = errno; + int e = strerror_r(eno, buf, len); + if (e != 0) { + if (e == -1) { + e = errno; + } + if (e == EINVAL) { + snprintf(buf, len, "Wrong errno %d", eno); + } else if (e == ERANGE) { + snprintf(buf, len, "errno = %d has huge message", eno); + } + } + errno = saveno; + return buf; +#else + return strerror_r(eno, buf, len); +#endif +} + +#ifndef __TINYC__ +const char* +ft_strerror(int eno) { + static __thread char buf[256]; + return ft__strerror(eno, buf, sizeof(buf)); +} +#endif + +struct ft_log_and_assert_level ft_log_assert_levels = { + .log_level = FT_INFO, +#ifndef NDEBUG + .assert_level = FT_ASSERT_ALL, +#else + .assert_level = FT_ASSERT_RUNTIME, +#endif +}; + +typedef struct { + const char *file; + uint32_t next; + struct ft_log_and_assert_level local_levels; +} ft_log_file_registration; + +#define FT_LOG_FILES_HASH (FT_LOG_MAX_FILES/4) +static ft_log_file_registration ft_log_file_regs[FT_LOG_MAX_FILES] = {{0}}; +static uint32_t ft_log_file_reg_hash[FT_LOG_FILES_HASH] = {0}; +static uint32_t ft_log_file_n = 0; + +extern void +ft__register_source( + const char *file, + struct ft_log_and_assert_level **local_levels) { + ft_log_file_registration *reg; + uint32_t hash; + + ft_assert(ft_log_file_n < FT_LOG_MAX_FILES); + ft_dbg_assert(file != NULL); + + reg = &ft_log_file_regs[ft_log_file_n++]; + + reg->file = file; + reg->local_levels = ft_log_assert_levels; + + *local_levels = ®->local_levels; + + hash = ft_small_cstr_hash(ft__base_log_filename(reg->file)); + reg->next = ft_log_file_reg_hash[hash%FT_LOG_FILES_HASH]; + ft_log_file_reg_hash[hash%FT_LOG_FILES_HASH] = ft_log_file_n; +} + +static void +ft__log_level_reset(int what, int level) { + uint32_t i; + + if (what) + ft_log_assert_levels.log_level = level; + else + ft_log_assert_levels.assert_level = level; + + for (i = 0; i < ft_log_file_n; i++) { + if (what) + ft_log_file_regs[i].local_levels.log_level = level; + else + ft_log_file_regs[i].local_levels.assert_level = level; + } +} + +static void +ft__log_level_set(const char *file, int what, int level) { + ft_log_file_registration *reg; + uint32_t hash, i; + bool found = false; + size_t len = strlen(file); + + ft_dbg_assert(file != NULL); + + if (strcmp(file, "ALL") == 0) { + ft__log_level_reset(what, level); + return; + } + + hash = ft_small_cstr_hash(ft__base_log_filename(file)); + i = ft_log_file_reg_hash[hash%FT_LOG_FILES_HASH]; + while (i) { + size_t reglen; + reg = &ft_log_file_regs[i-1]; + ft_dbg_assert(reg->file != NULL); + reglen = strlen(reg->file); + if (reglen >= len && strcmp(reg->file + (reglen-len), file) == 0) { + if (what) + reg->local_levels.log_level = level; + else + reg->local_levels.assert_level = level; + found = true; + } + i = reg->next; + } + if (found) + return; + /* + * ooops... not found... pity... + * ok, lets set global one, but without per-file setting + */ + if (what) + ft_log_assert_levels.log_level = level; + else + ft_log_assert_levels.assert_level = level; +} + +void +ft_log_level_reset(enum FT_LOG_LEVEL level) { + ft__log_level_reset(1, level); +} + +void +ft_assert_level_reset(enum FT_ASSERT_LEVEL level) { + ft__log_level_reset(0, level); +} + +void +ft_log_level_set(const char *file, enum FT_LOG_LEVEL level) { + ft__log_level_set(file, 1, level); +} + +void +ft_assert_level_set(const char *file, enum FT_ASSERT_LEVEL level) { + ft__log_level_set(file, 0, level); +} + +uint32_t +ft_rand(void) { + static volatile uint32_t rstate = 0xbeaf1234; + uint32_t rand = __atomic_fetch_add(&rstate, 0x11, __ATOMIC_RELAXED); + rand = ft_mix32(rand); + return rand; +} + +uint32_t +ft_small_cstr_hash(const char *key) { + unsigned char *str = (unsigned char *)key; + uint32_t h1 = 0x3b00; + uint32_t h2 = 0; + for (;str[0]; str++) { + h1 += str[0]; + h1 *= 9; + h2 += h1; + h2 = ft_rol32(h2, 7); + h2 *= 5; + } + h1 ^= h2; + h1 += ft_rol32(h2, 14); + h2 ^= h1; h2 += ft_ror32(h1, 6); + h1 ^= h2; h1 += ft_rol32(h2, 5); + h2 ^= h1; h2 += ft_ror32(h1, 8); + return h2; +} + diff --git a/src/fu_util/impl/ft_impl.h b/src/fu_util/impl/ft_impl.h new file mode 100644 index 000000000..67d637e41 --- /dev/null +++ b/src/fu_util/impl/ft_impl.h @@ -0,0 +1,480 @@ +/* vim: set expandtab autoindent cindent ts=4 sw=4 sts=4 : */ +#ifndef FT_IMPL_H +#define FT_IMPL_H + +#ifdef __TINYC__ + +#if defined(__attribute__) +#undef __attribute__ +#define __attribute__ __attribute__ +#endif + +#include +#define __atomic_add_fetch(x, y, z) ft__atomic_add_fetch((x), (y), z, fm_uniq(y)) +#define ft__atomic_add_fetch(x, y_, z, y) ({ \ + __typeof(y_) y = y_; \ + __atomic_fetch_add((x), y, z) + y; \ +}) +#define __atomic_sub_fetch(x, y, z) ft__atomic_sub_fetch((x), (y), z, fm_uniq(y)) +#define ft__atomic_sub_fetch(x, y_, z, y) ({ \ + __typeof(y_) y = y_; \ + __atomic_fetch_sub((x), y, z) - y; \ +}) +#define __atomic_load_n(x, z) __atomic_load((x), z) +#define __atomic_store_n(x, y, z) __atomic_store((x), (y), z) + +#endif /* __TINYC__ */ + +/* Memory */ + +/* Logging */ + +static ft_unused inline const char* +ft_log_level_str(enum FT_LOG_LEVEL level) { + switch (level) { + case FT_DEBUG: return "DEBUG"; + case FT_LOG: return "LOG"; + case FT_INFO: return "INFO"; + case FT_WARNING: return "WARNING"; + case FT_ERROR: return "ERROR"; + case FT_FATAL: return "FATAL"; + case FT_OFF: return "OFF"; + case FT_TRACE: return "TRACE"; + default: return "UNKNOWN"; + } +} + +extern void ft__init_log(ft_log_hook_t hook, const char *file); + +struct ft_log_and_assert_level { + enum FT_LOG_LEVEL log_level; + enum FT_ASSERT_LEVEL assert_level; +}; + +extern struct ft_log_and_assert_level ft_log_assert_levels; + +/* this variable is duplicated in every source as static variable */ +static ft_unused +struct ft_log_and_assert_level *ft_local_lgas_levels = &ft_log_assert_levels; + +#define ft_will_log(level) (level >= ft_local_lgas_levels->log_level) +extern void ft__register_source(const char *file, + struct ft_log_and_assert_level **local_levels); + +#if defined(__GNUC__) || defined(__TINYC__) +#define ft__register_source_impl() \ + static __attribute__((constructor)) void \ + ft__register_source_(void) { \ + ft__register_source(__FILE__, &ft_local_lgas_levels); \ + } \ + fm__dumb_require_semicolon +#else +#define ft_register_source_impl() fm__dumb_require_semicolon +#endif + +#define COMPARE_FT_FATAL(x) x +#define ft__log_impl(level, error, fmt_or_msg, ...) \ + fm_if(fm_equal(level, FT_FATAL), \ + ft__log_fatal(ft__srcpos(), error, ft__log_fmt_msg(fmt_or_msg, __VA_ARGS__)), \ + ft__log_common(level, error, fmt_or_msg, __VA_ARGS__)) + +#define ft__log_common(level, error, fmt_or_msg, ...) do {\ + if (level >= FT_ERROR || ft_unlikely(ft_will_log(level))) \ + ft__log(level, ft__srcpos(), error, ft__log_fmt_msg(fmt_or_msg, __VA_ARGS__)); \ +} while(0) + +#define ft__log_fmt_msg(fmt, ...) \ + fm_tuple_expand(fm_if(fm_no_va(__VA_ARGS__), ("%s", fmt), (fmt, __VA_ARGS__))) + +extern ft_gnu_printf(4, 5) +void ft__log(enum FT_LOG_LEVEL level, ft_source_position_t srcpos, const char* error, const char *fmt, ...); +extern _Noreturn ft_gnu_printf(3, 4) +void ft__log_fatal(ft_source_position_t srcpos, const char* error, const char *fmt, ...); + +ft_inline bool ft__dbg_enabled(void) { + return ft_unlikely(ft_local_lgas_levels->assert_level >= FT_ASSERT_ALL); +} + +#define ft__dbg_assert(x, xs, ...) do { \ + if (ft__dbg_enabled() && ft_unlikely(!(x))) \ + ft__log_fatal(ft__srcpos(), xs, ft__assert_arg(__VA_ARGS__)); \ +} while(0) + +#define ft__assert(x, xs, ...) do { \ + if (ft_unlikely(!(x))) \ + ft__log_fatal(ft__srcpos(), xs, ft__assert_arg(__VA_ARGS__)); \ +} while(0) + +#define ft__assert_arg(...) \ + fm_if(fm_no_va(__VA_ARGS__), "Asserion failed", \ + ft__log_fmt_msg(__VA_ARGS__)) + +#define ft__assyscall(syscall, res, ...) ({ \ + __typeof(syscall) res = (syscall); \ + ft__assert(res >= 0, ft_strerror(errno), #syscall __VA_ARGS__); \ + res; \ + }) + +/* Comparison */ + +#define ft__max(a_, b_, a, b) ({ \ + __typeof(a_) a = (a_); \ + __typeof(b_) b = (b_); \ + a < b ? b : a ; \ + }) + +#define ft__min(a_, b_, a, b) ({ \ + __typeof(a_) a = (a_); \ + __typeof(b_) b = (b_); \ + a > b ? b : a ; \ + }) + +#define ft__cmp(a_, b_, a, b) ({ \ + __typeof(a_) a = (a_); \ + __typeof(b_) b = (b_); \ + a < b ? FT_CMP_LT : (a > b ? FT_CMP_GT : FT_CMP_EQ); \ + }) + +#define ft__swap(a_, b_, ap, bp, t) do { \ + __typeof(a_) ap = a_; \ + __typeof(a_) bp = b_; \ + __typeof(*ap) t = *ap; \ + *ap = *bp; \ + *bp = t; \ +} while (0) + +#if defined(__has_builtin) || defined(__clang__) +# if __has_builtin(__builtin_add_overflow) && __has_builtin(__builtin_mul_overflow) +# define ft__has_builtin_int_overflow +# endif +#elif __GNUC__ > 4 && !defined(__clang__) && !defined(__LCC__) +# define ft__has_builtin_int_overflow +#endif + +ft_inline size_t ft_add_size(size_t a, size_t b) { + size_t r; +#ifdef ft__has_builtin_int_overflow + if (ft_unlikely(__builtin_add_overflow(a, b, &r))) + ft_assert(r >= a && r >= b); +#else + r = a + b; + ft_assert(r >= a && r >= b); +#endif + return r; +} + +ft_inline size_t ft_mul_size(size_t a, size_t b) { + size_t r; +#ifdef ft__has_builtin_int_overflow + if (ft_unlikely(__builtin_mul_overflow(a, b, &r))) + ft_assert(r / a == b); +#else + r = a * b; + ft_assert(r / a == b); +#endif + return r; +} + +extern ft_gcc_malloc(ft_realloc, 1) void* ft_realloc(void* ptr, size_t new_sz); +extern ft_gcc_malloc(ft_realloc, 1) void* ft_calloc(size_t sz); + +// Some Numeric Utils + +ft_inline uint32_t +ft_rol32(uint32_t x, unsigned n) { + return n == 0 ? x : n >= 32 ? 0 : (x << n) | (x >> (32 - n)); +} + +ft_inline uint32_t +ft_ror32(uint32_t x, unsigned n) { + return n == 0 ? x : n >= 32 ? 0 : (x << (32 - n)) | (x >> n); +} + +ft_inline size_t +ft_nextpow2(size_t sz) { + sz |= sz >> 1; + sz |= sz >> 2; + sz |= sz >> 4; + sz |= sz >> 8; + sz |= sz >> 16; +#if !defined(__SIZEOF_SIZE_T__) + if (sizeof(sz) > 4) + sz |= sz >> 32; +#elif __SIZEOF_SIZE_T__ > 4 + sz |= sz >> 32; +#endif + return ft_add_size(sz, 1); +} + +ft_inline uint32_t +ft_mix32(uint32_t h) +{ + h ^= h >> 16; + h *= 0x85ebca6b; + h ^= h >> 13; + h *= 0xc2b2ae35; + h ^= h >> 16; + return h; +} + +ft_inline uint32_t +ft_fast_randmod(uint32_t v, uint32_t mod) { + return (uint32_t)(((uint64_t)v * mod) >> 32); +} + +ft_inline uint32_t ft_randn(uint32_t mod) { + return ft_fast_randmod(ft_rand(), mod); +} + +/* ft_val_t */ +struct ft_arg { + union { + void *p; + char *s; + int64_t i; + uint64_t u; + double f; + bool b; +#ifdef FOBJ_OBJ_H + fobj_t o; +#endif + } v; + char t; +}; + +ft_inline ft_arg_t ft_mka_z(void) { return (ft_arg_t){.v={.u = 0}, .t='z'};} +ft_inline ft_arg_t ft_mka_p(void* p) { return (ft_arg_t){.v={.p = p}, .t='p'};} +ft_inline ft_arg_t ft_mka_s(char* s) { return (ft_arg_t){.v={.s = s}, .t='s'};} +ft_inline ft_arg_t ft_mka_i(int64_t i) { return (ft_arg_t){.v={.i = i}, .t='i'};} +ft_inline ft_arg_t ft_mka_u(uint64_t u) { return (ft_arg_t){.v={.u = u}, .t='u'};} +ft_inline ft_arg_t ft_mka_f(double f) { return (ft_arg_t){.v={.f = f}, .t='f'};} +ft_inline ft_arg_t ft_mka_b(bool b) { return (ft_arg_t){.v={.b = b}, .t='b'};} +#ifdef FOBJ_OBJ_H +ft_inline ft_arg_t ft_mka_o(fobj_t o) { return (ft_arg_t){.v={.o = o}, .t='o'};} +#endif + +ft_inline char ft_arg_type(ft_arg_t v) { return v.t; } + +ft_inline void ft_arg_z(ft_arg_t v) { ft_dbg_assert(v.t=='z'); } +ft_inline void* ft_arg_p(ft_arg_t v) { ft_dbg_assert(v.t=='p'); return v.v.p; } +ft_inline char* ft_arg_s(ft_arg_t v) { ft_dbg_assert(v.t=='s'); return v.v.s; } +ft_inline int64_t ft_arg_i(ft_arg_t v) { ft_dbg_assert(v.t=='i'); return v.v.i; } +ft_inline uint64_t ft_arg_u(ft_arg_t v) { ft_dbg_assert(v.t=='u'); return v.v.u; } +ft_inline double ft_arg_f(ft_arg_t v) { ft_dbg_assert(v.t=='f'); return v.v.f; } +ft_inline bool ft_arg_b(ft_arg_t v) { ft_dbg_assert(v.t=='b'); return v.v.b; } +#ifdef FOBJ_OBJ_H +ft_inline fobj_t ft_arg_o(ft_arg_t v) { ft_dbg_assert(v.t=='o'); return v.v.o; } +#endif + +/* slices and arrays */ + +ft_inline size_t +ft__index_unify(ssize_t at, size_t len) { + if (at >= 0) { + ft_assert(at < len); + return at; + } else { + ft_assert((size_t)(-at) <= len); + return (size_t)(len - (size_t)(-at)); + } +} + +ft_inline size_t +ft__slcindex_unify(ssize_t end, size_t len) { + if (end >= 0) { + ft_assert(end <= len); + return end; + } else if (end == FT_SLICE_END) { + return len; + } else { + ft_assert((size_t)(-end) <= len); + return (size_t)(len - (size_t)(-end)); + } +} + +// Bytes + +ft_inline void +ft_bytes_consume(ft_bytes_t *bytes, size_t cut) { + ft_dbg_assert(cut <= bytes->len); + bytes->ptr = bytes->ptr + cut; + bytes->len -= cut; +} + +ft_inline void +ft_bytes_move(ft_bytes_t *dest, ft_bytes_t *src) { + size_t len = ft_min(dest->len, src->len); + memmove(dest->ptr, src->ptr, len); + ft_bytes_consume(dest, len); + ft_bytes_consume(src, len); +} + +// String utils +ft_inline char * +ft_cstrdup(const char *str) { + return (char*)ft_strdup(ft_cstr(str)).ptr; +} + +ft_inline ft_str_t +ft_strdup(ft_str_t str) { + char *mem = ft_malloc(str.len + 1); + if (str.ptr != NULL) + memcpy(mem, str.ptr, str.len+1); + else + mem[0] = '0'; + str.ptr = mem; + return str; +} + +ft_inline bool +ft_streq(ft_str_t str, ft_str_t oth) { + return str.len == oth.len && strncmp(str.ptr, oth.ptr, str.len) == 0; +} + +ft_inline FT_CMP_RES +ft_strcmp(ft_str_t str, ft_str_t oth) { + size_t m = ft_min(str.len, oth.len); + return strncmp(str.ptr, oth.ptr, m) ?: ft_cmp(str.len, oth.len); +} + +ft_inline bool +ft_streqc(ft_str_t str, const char* oth) { + return ft_streq(str, ft_cstr(oth)); +} + +ft_inline FT_CMP_RES +ft_strcmpc(ft_str_t str, const char* oth) { + return ft_strcmp(str, ft_cstr(oth)); +} + +ft_inline ft_strbuf_t +ft_strbuf_zero(void) { + return (ft_strbuf_t){.ptr = "", .len = 0, .cap = 0}; +} + +ft_inline ft_strbuf_t +ft_strbuf_init_stack(char *buf, size_t capa) { + if (capa == 0) + return (ft_strbuf_t){.ptr = "", .len = 0, .cap = 0}; + ft_assert(capa <= UINT32_MAX); + buf[0] = '\0'; + return (ft_strbuf_t){.ptr = buf, .len = 0, .cap = capa-1}; +} + +ft_inline ft_strbuf_t +ft_strbuf_continue(char *buf, size_t capa) { + if (capa == 0) + return (ft_strbuf_t){.ptr = "", .len = 0, .cap = 0}; + ft_assert(capa <= UINT32_MAX); + buf[0] = '\0'; + return (ft_strbuf_t){.ptr = buf, .len = 0, .cap = capa-1}; +} + +ft_inline ft_strbuf_t +ft_strbuf_init_fixed(char *buf, size_t capa) { + ft_assert(capa > 0 && capa <= UINT32_MAX); + buf[0] = '\0'; + return (ft_strbuf_t){.ptr = buf, .len = 0, .cap = capa-1, .fixed = true}; +} + +ft_inline ft_strbuf_t +ft_strbuf_init_str(ft_str_t str) { + ft_assert(str.len <= UINT32_MAX); + return (ft_strbuf_t){.ptr = str.ptr, .len = str.len, .cap = str.len}; +} + +/* + * always allocates space for 1 zero ending byte. + * Returns false, if buffer reaches 4GB limit. + */ +extern bool ft__strbuf_ensure(ft_strbuf_t *buf, size_t n); + +ft_inline bool +ft_strbuf_may(ft_strbuf_t *buf) { + return !buf->fixed || buf->len < buf->cap; +} + +ft_inline bool +ft_strbuf_ensure(ft_strbuf_t *buf, size_t n) { + if ((size_t)buf->cap < ft_add_size(buf->len, n)) { + if (buf->fixed) + return false; + return ft__strbuf_ensure(buf, n); + } + return true; +} + +ft_inline bool +ft_strbuf_cat(ft_strbuf_t *buf, ft_str_t s) { + if (!ft_strbuf_may(buf)) + return false; + if (s.len == 0) + return true; + if (!ft_strbuf_ensure(buf, s.len)) { + s.len = buf->cap - buf->len; + ft_assert(s.len > 0); + } + memmove(buf->ptr + buf->len, s.ptr, s.len); + buf->len += s.len; + buf->ptr[buf->len] = '\0'; + return ft_strbuf_may(buf); +} + +ft_inline bool +ft_strbuf_cat1(ft_strbuf_t *buf, char c) { + if (!ft_strbuf_may(buf)) + return false; + if (ft_strbuf_ensure(buf, 1)) { + buf->ptr[buf->len+0] = c; + buf->ptr[buf->len+1] = '\0'; + buf->len++; + } + return ft_strbuf_may(buf); +} + +ft_inline bool +ft_strbuf_cat2(ft_strbuf_t *buf, char c1, char c2) { + if (!ft_strbuf_may(buf)) + return false; + if (ft_strbuf_ensure(buf, 2)) { + buf->ptr[buf->len+0] = c1; + buf->ptr[buf->len+1] = c1; + buf->ptr[buf->len+2] = '\0'; + buf->len+=2; + } else { + buf->ptr[buf->len+0] = c1; + buf->ptr[buf->len+1] = '\0'; + buf->len++; + } + return ft_strbuf_may(buf); +} + +ft_inline bool +ft_strbuf_catc(ft_strbuf_t *buf, const char *s) { + return ft_strbuf_cat(buf, ft_cstr(s)); +} + +ft_inline void +ft_strbuf_free(ft_strbuf_t *buf) { + if (buf->alloced) { + ft_free(buf->ptr); + } + *buf = (ft_strbuf_t){NULL}; +} + +ft_inline ft_str_t +ft_strbuf_ref(ft_strbuf_t *buf) { + return ft_str(buf->ptr, buf->len); +} + +ft_inline ft_str_t +ft_strbuf_steal(ft_strbuf_t *buf) { + ft_str_t res = ft_str(buf->ptr, buf->len); + if (!buf->alloced) { + res = ft_strdup(res); + } + *buf = (ft_strbuf_t){NULL}; + return res; +} + +#endif diff --git a/src/fu_util/test/CMakeLists.txt b/src/fu_util/test/CMakeLists.txt new file mode 100644 index 000000000..06f86effc --- /dev/null +++ b/src/fu_util/test/CMakeLists.txt @@ -0,0 +1,28 @@ +cmake_minimum_required(VERSION 3.11) + +add_executable(array array.c) +target_link_libraries(array fu_utils) + +add_executable(bsearch bsearch.c) +target_link_libraries(bsearch fu_utils) + +add_executable(fuprintf fuprintf.c) +target_link_libraries(fuprintf fu_utils) + +add_executable(sort sort.c) +target_link_libraries(sort fu_utils) + +add_executable(sort_p sort_p.c) +target_link_libraries(sort_p fu_utils) + +add_executable(obj1 obj1.c) +target_link_libraries(obj1 fu_utils) + +enable_testing() + +add_test(NAME array COMMAND array) +add_test(NAME bsearch COMMAND bsearch) +add_test(NAME fuprintf COMMAND fuprintf) +add_test(NAME sort COMMAND sort) +add_test(NAME sort_p COMMAND sort_p) +add_test(NAME obj1 COMMAND obj1) diff --git a/src/fu_util/test/array.c b/src/fu_util/test/array.c new file mode 100644 index 000000000..aeb108e58 --- /dev/null +++ b/src/fu_util/test/array.c @@ -0,0 +1,135 @@ +#include +#include +#include +#define FU_MALLOC_RAW +#include "../ft_util.h" +#include "../ft_ss_examples.h" +#include "../ft_ar_examples.h" +#include + +static void +check_equal_fun(int *a, int *b, int len) +{ + for (len--; len >= 0; len--) + ft_assert(a[len] == b[len]); +} + +#define check_equal(_a_, ...) do { \ + int _cmp_[] = {__VA_ARGS__}; \ + int _len_ = ft_arrsz(_cmp_); \ + ft_assert((_a_)->len == _len_); \ + check_equal_fun((_a_)->ptr, _cmp_, _len_); \ +} while (0) + +static int wlkcnt = 0; +static FT_WALK_ACT +walk_simple(int *el) { + wlkcnt++; + if (*el > 8) + return FT_WALK_BREAK; + return FT_WALK_CONT; +} + +static FT_WALK_ACT +walk_del(int *el, ft_arg_t v) { + wlkcnt++; + if (*el == ft_arg_i(v)) + return FT_WALK_DEL; + return FT_WALK_CONT; +} + +static FT_WALK_ACT +walk_del2(int *el, ft_arg_t v) { + wlkcnt++; + if (*el == ft_arg_i(v)) + return FT_WALK_DEL_BREAK; + return FT_WALK_CONT; +} + + +int +main(void) { + ft_arr_int_t arr = ft_arr_init(); + int v, i; + ft_bsres_t bsres; + + ft_arr_int_push(&arr, 1); + check_equal(&arr, 1); + + ft_arr_int_push(&arr, 10); + ft_arr_int_push(&arr, 5); + ft_arr_int_push(&arr, 25); + ft_arr_int_push(&arr, 15); + ft_arr_int_push(&arr, 2); + + check_equal(&arr, 1, 10, 5, 25, 15, 2); + + ft_arr_int_resize(&arr, 1); + check_equal(&arr, 1); + ft_arr_int_append(&arr, ((int[]){10, 5, 25, 15, 2}), 5); + check_equal(&arr, 1, 10, 5, 25, 15, 2); + + ft_assert(ft_arr_int_at(&arr, 1) == 10); + ft_assert(ft_arr_int_at(&arr, 5) == 2); + + ft_shsort_int(ft_2ptrlen(arr), ft_int_cmp); + check_equal(&arr, 1, 2, 5, 10, 15, 25); + ft_assert(ft_arr_int_at(&arr, 2) == 5); + ft_assert(ft_arr_int_at(&arr, 5) == 25); + + ft_arr_int_set(&arr, 2, 8); + check_equal(&arr, 1, 2, 8, 10, 15, 25); + + bsres = ft_bsearch_int(ft_2ptrlen(arr), 14, ft_int_cmp); + ft_assert(bsres.ix == 4); + ft_assert(!bsres.eq); + bsres = ft_bsearch_int(ft_2ptrlen(arr), 2, ft_int_cmp); + ft_assert(bsres.ix == 1); + ft_assert(bsres.eq); + + i = ft_search_int(ft_2ptrlen(arr), 2, ft_int_cmp); + ft_assert(i == 1); + i = ft_search_int(ft_2ptrlen(arr), 3, ft_int_cmp); + ft_assert(i == 6); + + v = ft_arr_int_pop(&arr); + ft_assert(v == 25); + check_equal(&arr, 1, 2, 8, 10, 15); + + v = ft_arr_int_del_at(&arr, 1); + ft_assert(v == 2); + check_equal(&arr, 1, 8, 10, 15); + + ft_arr_int_insert_at(&arr, 3, 11); + check_equal(&arr, 1, 8, 10, 11, 15); + ft_arr_int_insert_at(&arr, 5, 20); + check_equal(&arr, 1, 8, 10, 11, 15, 20); + + ft_arr_int_del_slice(&arr, 3, 5); + check_equal(&arr, 1, 8, 10, 20); + + ft_arr_int_insert_n(&arr, 1, (int[]){7, 7, 9, 9}, 4); + check_equal(&arr, 1, 7, 7, 9, 9, 8, 10, 20); + + ft_arr_int_del_slice(&arr, -2, FT_SLICE_END); + check_equal(&arr, 1, 7, 7, 9, 9, 8); + + wlkcnt = 0; + ft_arr_int_walk(&arr, walk_simple); + ft_assert(wlkcnt == 4); + + wlkcnt = 0; + ft_arr_int_walk_r(&arr, walk_del, ft_mka_i(9)); + ft_assert(wlkcnt == 6); + check_equal(&arr, 1, 7, 7, 8); + + wlkcnt = 0; + ft_arr_int_walk_r(&arr, walk_del2, ft_mka_i(7)); + ft_assert(wlkcnt == 2); + check_equal(&arr, 1, 7, 8); + + ft_arr_int_free(&arr); + ft_assert(arr.len == 0); + ft_assert(arr.ptr == NULL); + +} diff --git a/src/fu_util/test/bsearch.c b/src/fu_util/test/bsearch.c new file mode 100644 index 000000000..9d33fd806 --- /dev/null +++ b/src/fu_util/test/bsearch.c @@ -0,0 +1,46 @@ +#include +#include +#include +#include +#include +#include + +int +main(void) { + int ex[] = {1, 3, 5, 7, 8, 9}; + ft_bsres_t bs; + + bs = ft_bsearch_int(ex, 6, 0, ft_int_cmp); + ft_assert(bs.ix == 0); + ft_assert(!bs.eq); + bs = ft_bsearch_int(ex, 6, 1, ft_int_cmp); + ft_assert(bs.ix == 0); + ft_assert(bs.eq); + bs = ft_bsearch_int(ex, 6, 2, ft_int_cmp); + ft_assert(bs.ix == 1); + ft_assert(!bs.eq); + bs = ft_bsearch_int(ex, 6, 3, ft_int_cmp); + ft_assert(bs.ix == 1); + ft_assert(bs.eq); + bs = ft_bsearch_int(ex, 6, 4, ft_int_cmp); + ft_assert(bs.ix == 2); + ft_assert(!bs.eq); + bs = ft_bsearch_int(ex, 6, 5, ft_int_cmp); + ft_assert(bs.ix == 2); + ft_assert(bs.eq); + bs = ft_bsearch_int(ex, 6, 6, ft_int_cmp); + ft_assert(bs.ix == 3); + ft_assert(!bs.eq); + bs = ft_bsearch_int(ex, 6, 7, ft_int_cmp); + ft_assert(bs.ix == 3); + ft_assert(bs.eq); + bs = ft_bsearch_int(ex, 6, 8, ft_int_cmp); + ft_assert(bs.ix == 4); + ft_assert(bs.eq); + bs = ft_bsearch_int(ex, 6, 9, ft_int_cmp); + ft_assert(bs.ix == 5); + ft_assert(bs.eq); + bs = ft_bsearch_int(ex, 6, 10, ft_int_cmp); + ft_assert(bs.ix == 6); + ft_assert(!bs.eq); +} diff --git a/src/fu_util/test/fuprintf.c b/src/fu_util/test/fuprintf.c new file mode 100644 index 000000000..73f680da7 --- /dev/null +++ b/src/fu_util/test/fuprintf.c @@ -0,0 +1,26 @@ +/* vim: set expandtab autoindent cindent ts=4 sw=4 sts=4 */ +#include +#include +#include + +int main(void) { + ft_str_t msg = ft_asprintf("asdf %d asdf\n", 123456); + const char *cmp = "asdf 123456 asdf\n"; + + ft_assert(msg.ptr != NULL); + ft_assert(msg.len == strlen(cmp)); + ft_assert(strcmp(msg.ptr, cmp) == 0); + + for (int i = 0; i < 10; i++) { + ft_str_t newmsg = ft_asprintf("%s%s", msg.ptr, msg.ptr); + ft_free((char*)msg.ptr); + msg = newmsg; + ft_assert(msg.ptr != NULL); + ft_assert(msg.len == strlen(cmp) * (1 << (i+1))); + } + + ft_free((char*)msg.ptr); + + return 0; +} + diff --git a/src/fu_util/test/obj1.c b/src/fu_util/test/obj1.c new file mode 100644 index 000000000..26a108b37 --- /dev/null +++ b/src/fu_util/test/obj1.c @@ -0,0 +1,293 @@ +/* vim: set expandtab autoindent cindent ts=4 sw=4 sts=4 */ +#include +#include + +#include +#include + +static int verbose = 0; +#define logf(...) ft_log(FT_DEBUG, __VA_ARGS__) + +#define mth__ioRead ssize_t, (void *, buf), (size_t, count) +#define mth__ioRead__optional() (count, 4) +#define mth__ioClose int +#define mth__ioStatus int +#define mth__fobjGetError err_i +fobj_method(ioRead); +fobj_method(ioClose); +fobj_method(ioStatus); +fobj_method(fobjGetError); + +#define iface__ioReader mth(ioRead) +#define iface__ioReadCloser iface__ioReader, mth(ioClose), opt(ioStatus) +#define iface__obj +fobj_iface(ioReadCloser); +fobj_iface(ioReader); +fobj_iface(obj); + + +#define kls__Klass0 mth(fobjDispose), \ + iface__ioReader, mth(fobjGetError) +#define kls__KlassA inherits(Klass0), \ + iface__ioReadCloser, \ + mth(ioStatus), iface(ioReadCloser, ioReader) + +fobj_klass(Klass0); +fobj_klass(KlassA); + +typedef struct Klass0 { + int x; +} Klass0; + + +typedef struct KlassA { + Klass0 p; + size_t offset; +} KlassA; + +static void +Klass0_fobjDispose(VSelf) { + Self(Klass0); + logf("{.x = %d}", self->x); +} + +static ssize_t +Klass0_ioRead(VSelf, void *buf, size_t count) { + Self(Klass0); + logf("{.x = %d}, .count = %zd", self->x, count); + self->x += 1; + return count; +} + +fobj_error_int_key(myx); +fobj_error_float_key(myy); + +static err_i +Klass0_fobjGetError(VSelf) { + Self(Klass0); + return $err(RT, "WTF ERROR {myx:05d} {myy:9.4f}", (myx, self->x), (myy, 100.001)); +} + +static int +KlassA_ioClose(VSelf) { + //Self(KlassA); + return 0; +} + +static ssize_t +KlassA_ioRead(VSelf, void *buf, size_t count) { + Self(KlassA); + logf("p{.offset = %zd}, .count = %zd", + self->offset, count); + self->offset += count; + $super(ioRead, self, buf, count); + return count; +} + +static int +KlassA_ioStatus(VSelf) { + Self(KlassA); + logf("{.offset = %zd}", self->offset); + return (int)self->offset; +} + +static void +KlassA_fobjDispose(VSelf) { + Self(KlassA); + logf("{.offset = %zd}", self->offset); +} + +fobj_klass_handle(KlassA, mth(fobjDispose), iface(obj)); +fobj_klass_handle(Klass0); + +int main(int argc, char** argv) { + ft_init_log(NULL); + fobj_init(); + + FOBJ_FUNC_ARP(); + ft_assert(fobj__func_ar_pool.last != NULL); + + char b[1024]; + int benchmode = 0, benchcnt = 0; + int i; + + verbose = atoi(getenv("VERBOSE") ?: "0"); + benchcnt = atoi(getenv("BENCHCNT") ?: "0"); + benchmode = atoi(getenv("BENCHMODE") ?: "0"); + + if (verbose) { + //ft_log_level_reset(FT_LOG); + ft_log_level_set(__FILE__, FT_DEBUG); + } + + fobj_klass_init(Klass0); + fobj_klass_init(KlassA); + + fobj_freeze(); + + KlassA *a = $alloc(KlassA, .offset = 1, .p.x = 2); + logf("a=%s", fobjRepr(a)->ptr); + + logf("Before block 1 enter"); + { + FOBJ_BLOCK_ARP(); + KlassA *d; + fobj_t e; + logf("Before block 2 enter"); + { + FOBJ_BLOCK_ARP(); + KlassA *c = $alloc(KlassA, .p.x = 55555); + d = $alloc(KlassA, .p.x = 12345); + e = $alloc(KlassA, .p.x = 67890); + $unref($ref(c)); /* incref and store in current ARP */ + $save(d); /* store in outter ARP */ + $ref(e); /* explicit reference increment */ + logf("Before block 2 exits"); + } + logf("After block 2 exited"); + /* $set is needed only if variable is explicitely managed with $ref/$del */ + $set(&e, $alloc(KlassA, .p.x = 67891)); + $swap(&e, $alloc(KlassA, .p.x = 78912)); + $del(&e); /* explicit reference decrement */ + logf("Before block 1 exits"); + } + logf("After block 1 exited"); + + ioRead_i aird = bind_ioRead(a); + $i(ioRead, aird, b, 100); + $i(ioRead, aird, b); + // will fail in runtime with param.buf__given != NULL + //$i(ioRead, aird, .count = 100); + $i(ioRead, aird, .buf = b, .count = 100); + $i(ioRead, aird, .buf = b); + + ioReader_i ard = bind_ioReader(a); + $i(ioRead, ard, b, 100); + $i(ioRead, ard, .buf = b, .count = 100); + + ard = $bind(ioReader, a); + aird = $bind(ioRead, ard.self); + aird = $reduce(ioRead, ard); + ard = $reduce(ioReader, aird); + + ioReadCloser_i ardcl = bind_ioReadCloser(a); + ardcl = $reduce(ioReadCloser, ardcl); + ard = $reduce(ioReader, ardcl); + aird = $reduce(ioRead, ardcl); + + ioRead(a, b, 100); + $(ioRead, a, b, 100); + $(ioRead, a, .buf = b, .count = 100); + + $(ioStatus, a); + + aird = (ioRead_i){NULL}; + ard = (ioReader_i){NULL}; + + err_i err = $err(RT, "ha"); + + ft_assert(!$implements(ioRead, err.self)); + ft_assert(!$implements(ioRead, err.self, &aird)); + ft_assert(!$ifilled(ioRead, aird)); + ft_assert(!$implements(ioReader, err.self)); + ft_assert(!$implements(ioReader, err.self, &ard)); + ft_assert(!$ifilled(ioRead, ard)); + + ft_assert($implements(ioRead, a)); + ft_assert($implements(ioRead, a, &aird)); + ft_assert($ifilled(ioRead, aird)); + ft_assert($implements(ioReader, a)); + ft_assert($implements(ioReader, a, &ard)); + ft_assert($ifilled(ioRead, ard)); + + i = ioStatus(a) - 1; + ft_assert($ifdef(,ioStatus, a)); + ft_assert(i != ioStatus(a)); + ft_assert($ifdef(i =, ioStatus, a)); + ft_assert(i == ioStatus(a)); + ft_assert(!$ifdef(,fobjFormat, a)); + + err = $(fobjGetError, a); + logf("Error: %s", $errmsg(err)); + logf("Error: %s", $itostr(err, NULL)->ptr); + logf("Error: %s", $itostr(err, "$T $M $K")->ptr); + ioRead(a, b, strlen($errmsg(err))); + $(ioRead, a, b, strlen($errmsg(err))); + $(ioRead, a, b, $(ioRead, a, b, $(ioStatus, a))); + logf("Error: %s", $errmsg($(fobjGetError, a))); + + errno = ENOENT; + err = $syserr(); + logf("Error: %s", $errmsg(err)); + logf("Error: %s", $irepr(err)->ptr); + errno = ENOENT; + err = $syserr("Opening file"); + logf("Error: %s", $errmsg(err)); + logf("Error: %s", $irepr(err)->ptr); + errno = ENOENT; + err = $syserr("Opening file {path}", (path, "folder/read.me")); + logf("Error: %s", $errmsg(err)); + logf("Error: %s", $irepr(err)->ptr); + logf("Errno: %d", getErrno(err)); + + Klass0 *k0 = $alloc(Klass0); + aird = bind_ioRead(k0); + ioRead__cb k0_ioRead = fetch_cb_ioRead(k0, fobj_self_klass); + for (i = 0; i < benchcnt; i++) { + switch (benchmode) { + case 0: ioRead(k0, b, 100); break; + case 1: $(ioRead, k0, b, 100); break; + case 2: $i(ioRead, aird, b, 100); break; + case 3: fobj_cb_fastcall(k0_ioRead, b, 100); break; + } + } + + $ref(a); + { fobj_t b = a; $del(&b); } + $(ioStatus, a); + + { + ioRead_i bird = {NULL}; + $iset(&bird, aird); + $iswap(&bird, aird); + $iref(bird); + $iunref(bird); + $idel(&bird); + } + + fobjStr *stra = $S("this is string a"); + fobjStr *strb = $S("this is b"); + + ft_assert(fobj_streq_c(stra, "this is string a")); + ft_assert(fobj_streq_c(strb, "this is b")); + + fobjStr *strc = fobj_strcatc(stra, "??????"); + fobjStr *strd = fobj_strcatc(strb, "!!"); + + ft_assert(fobj_streq_c(strc, "this is string a??????")); + ft_assert(fobj_streq_c(strd, "this is b!!")); + + fobjStr *stre = fobj_stradd(strc, strd); + + ft_assert(stre->len == strc->len + strd->len); + ft_assert(fobj_streq_c(stre, "this is string a??????this is b!!")); + + stre = fobj_sprintf("%s:%d", "hello", 1); + + ft_assert(fobj_streq_c(stre, "hello:1")); + + stre = fobj_strcatf(stre, "/%d/%s", 100, "goodbye"); + + ft_assert(fobj_streq_c(stre, "hello:1/100/goodbye")); + + fobjStr *strf = $fmt("Some {usual:8s} things cost > $${money:-8.4f}$$"); + ft_assert(fobj_streq_c(strf, "Some things cost > $$$$")); + strf = $fmt("Some {usual:8s} things cost > $${money:-8.4f}$$", + (usual, $S("scary")), (money, $F(12.48))); + ft_assert(fobj_streq_c(strf, "Some scary things cost > $$12.4800 $$"), + "String is '%s'", strf->ptr); + + logf("BEFORE EXIT"); +} + +ft_register_source(); diff --git a/src/fu_util/test/qsort/qsort.inc.c b/src/fu_util/test/qsort/qsort.inc.c new file mode 100644 index 000000000..c801ae52a --- /dev/null +++ b/src/fu_util/test/qsort/qsort.inc.c @@ -0,0 +1,249 @@ +/* Copyright (C) 1991-2018 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Written by Douglas C. Schmidt (schmidt@ics.uci.edu). + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +/* If you consider tuning this algorithm, you should consult first: + Engineering a sort function; Jon Bentley and M. Douglas McIlroy; + Software - Practice and Experience; Vol. 23 (11), 1249-1265, 1993. */ + +#include +#include +#include +#include + +/* Byte-wise swap two items of size SIZE. */ +#define SWAP(a, b, size) \ + do \ + { \ + size_t __size = (size); \ + char *__a = (a), *__b = (b); \ + do \ + { \ + char __tmp = *__a; \ + *__a++ = *__b; \ + *__b++ = __tmp; \ + } while (--__size > 0); \ + } while (0) + +/* Discontinue quicksort algorithm when partition gets below this size. + This particular magic number was chosen to work best on a Sun 4/260. */ +#define MAX_THRESH 4 + +/* Stack node declarations used to store unfulfilled partition obligations. */ +typedef struct + { + char *lo; + char *hi; + } stack_node; + +/* The next 4 #defines implement a very fast in-line stack abstraction. */ +/* The stack needs log (total_elements) entries (we could even subtract + log(MAX_THRESH)). Since total_elements has type size_t, we get as + upper bound for log (total_elements): + bits per byte (CHAR_BIT) * sizeof(size_t). */ +#define STACK_SIZE (CHAR_BIT * sizeof(size_t)) +#define PUSH(low, high) ((void) ((top->lo = (low)), (top->hi = (high)), ++top)) +#define POP(low, high) ((void) (--top, (low = top->lo), (high = top->hi))) +#define STACK_NOT_EMPTY (stack < top) + + +/* Order size using quicksort. This implementation incorporates + four optimizations discussed in Sedgewick: + + 1. Non-recursive, using an explicit stack of pointer that store the + next array partition to sort. To save time, this maximum amount + of space required to store an array of SIZE_MAX is allocated on the + stack. Assuming a 32-bit (64 bit) integer for size_t, this needs + only 32 * sizeof(stack_node) == 256 bytes (for 64 bit: 1024 bytes). + Pretty cheap, actually. + + 2. Chose the pivot element using a median-of-three decision tree. + This reduces the probability of selecting a bad pivot value and + eliminates certain extraneous comparisons. + + 3. Only quicksorts TOTAL_ELEMS / MAX_THRESH partitions, leaving + insertion sort to order the MAX_THRESH items within each partition. + This is a big win, since insertion sort is faster for small, mostly + sorted array segments. + + 4. The larger of the two sub-partitions is always pushed onto the + stack first, with the algorithm then concentrating on the + smaller partition. This *guarantees* no more than log (total_elems) + stack size is needed (actually O(1) in this case)! */ + +void +_quicksort (void *const pbase, size_t total_elems, size_t size, + int (*cmp)(const void *, const void*, void*), void *arg) +{ + char *base_ptr = (char *) pbase; + + const size_t max_thresh = MAX_THRESH * size; + + if (total_elems == 0) + /* Avoid lossage with unsigned arithmetic below. */ + return; + + if (total_elems > MAX_THRESH) + { + char *lo = base_ptr; + char *hi = &lo[size * (total_elems - 1)]; + stack_node stack[STACK_SIZE]; + stack_node *top = stack; + + PUSH (NULL, NULL); + + while (STACK_NOT_EMPTY) + { + char *left_ptr; + char *right_ptr; + + /* Select median value from among LO, MID, and HI. Rearrange + LO and HI so the three values are sorted. This lowers the + probability of picking a pathological pivot value and + skips a comparison for both the LEFT_PTR and RIGHT_PTR in + the while loops. */ + + char *mid = lo + size * ((hi - lo) / size >> 1); + + if ((*cmp) ((void *) mid, (void *) lo, arg) < 0) + SWAP (mid, lo, size); + if ((*cmp) ((void *) hi, (void *) mid, arg) < 0) + SWAP (mid, hi, size); + else + goto jump_over; + if ((*cmp) ((void *) mid, (void *) lo, arg) < 0) + SWAP (mid, lo, size); + jump_over:; + + left_ptr = lo + size; + right_ptr = hi - size; + + /* Here's the famous ``collapse the walls'' section of quicksort. + Gotta like those tight inner loops! They are the main reason + that this algorithm runs much faster than others. */ + do + { + while ((*cmp) ((void *) left_ptr, (void *) mid, arg) < 0) + left_ptr += size; + + while ((*cmp) ((void *) mid, (void *) right_ptr, arg) < 0) + right_ptr -= size; + + if (left_ptr < right_ptr) + { + SWAP (left_ptr, right_ptr, size); + if (mid == left_ptr) + mid = right_ptr; + else if (mid == right_ptr) + mid = left_ptr; + left_ptr += size; + right_ptr -= size; + } + else if (left_ptr == right_ptr) + { + left_ptr += size; + right_ptr -= size; + break; + } + } + while (left_ptr <= right_ptr); + + /* Set up pointers for next iteration. First determine whether + left and right partitions are below the threshold size. If so, + ignore one or both. Otherwise, push the larger partition's + bounds on the stack and continue sorting the smaller one. */ + + if ((size_t) (right_ptr - lo) <= max_thresh) + { + if ((size_t) (hi - left_ptr) <= max_thresh) + /* Ignore both small partitions. */ + POP (lo, hi); + else + /* Ignore small left partition. */ + lo = left_ptr; + } + else if ((size_t) (hi - left_ptr) <= max_thresh) + /* Ignore small right partition. */ + hi = right_ptr; + else if ((right_ptr - lo) > (hi - left_ptr)) + { + /* Push larger left partition indices. */ + PUSH (lo, right_ptr); + lo = left_ptr; + } + else + { + /* Push larger right partition indices. */ + PUSH (left_ptr, hi); + hi = right_ptr; + } + } + } + + /* Once the BASE_PTR array is partially sorted by quicksort the rest + is completely sorted using insertion sort, since this is efficient + for partitions below MAX_THRESH size. BASE_PTR points to the beginning + of the array to sort, and END_PTR points at the very last element in + the array (*not* one beyond it!). */ + +#define min(x, y) ((x) < (y) ? (x) : (y)) + + { + char *const end_ptr = &base_ptr[size * (total_elems - 1)]; + char *tmp_ptr = base_ptr; + char *thresh = min(end_ptr, base_ptr + max_thresh); + char *run_ptr; + + /* Find smallest element in first threshold and place it at the + array's beginning. This is the smallest array element, + and the operation speeds up insertion sort's inner loop. */ + + for (run_ptr = tmp_ptr + size; run_ptr <= thresh; run_ptr += size) + if ((*cmp) ((void *) run_ptr, (void *) tmp_ptr, arg) < 0) + tmp_ptr = run_ptr; + + if (tmp_ptr != base_ptr) + SWAP (tmp_ptr, base_ptr, size); + + /* Insertion sort, running from left-hand-side up to right-hand-side. */ + + run_ptr = base_ptr + size; + while ((run_ptr += size) <= end_ptr) + { + tmp_ptr = run_ptr - size; + while ((*cmp) ((void *) run_ptr, (void *) tmp_ptr, arg) < 0) + tmp_ptr -= size; + + tmp_ptr += size; + if (tmp_ptr != run_ptr) + { + char *trav; + + trav = run_ptr + size; + while (--trav >= run_ptr) + { + char c = *trav; + char *hi, *lo; + + for (hi = lo = trav; (lo -= size) >= tmp_ptr; hi = lo) + *hi = *lo; + *hi = c; + } + } + } + } +} diff --git a/src/fu_util/test/qsort/qsort_pg.inc.c b/src/fu_util/test/qsort/qsort_pg.inc.c new file mode 100644 index 000000000..0d3221aff --- /dev/null +++ b/src/fu_util/test/qsort/qsort_pg.inc.c @@ -0,0 +1,21 @@ +/* + * qsort.c: standard quicksort algorithm + */ +#include + +#define ST_SORT pg_qsort +#define ST_ELEMENT_TYPE_VOID +#define ST_COMPARE_RUNTIME_POINTER +#define ST_SCOPE +#define ST_DECLARE +#define ST_DEFINE +#include "./sort_template.h" + +/* + * qsort comparator wrapper for strcmp. + */ +int +pg_qsort_strcmp(const void *a, const void *b) +{ + return strcmp(*(const char *const *) a, *(const char *const *) b); +} diff --git a/src/fu_util/test/qsort/sort_template.h b/src/fu_util/test/qsort/sort_template.h new file mode 100644 index 000000000..fd24f5962 --- /dev/null +++ b/src/fu_util/test/qsort/sort_template.h @@ -0,0 +1,436 @@ +/*------------------------------------------------------------------------- + * + * sort_template.h + * + * A template for a sort algorithm that supports varying degrees of + * specialization. + * + * Copyright (c) 2021-2022, PostgreSQL Global Development Group + * Portions Copyright (c) 1992-1994, Regents of the University of California + * + * Usage notes: + * + * To generate functions specialized for a type, the following parameter + * macros should be #define'd before this file is included. + * + * - ST_SORT - the name of a sort function to be generated + * - ST_ELEMENT_TYPE - type of the referenced elements + * - ST_DECLARE - if defined the functions and types are declared + * - ST_DEFINE - if defined the functions and types are defined + * - ST_SCOPE - scope (e.g. extern, static inline) for functions + * - ST_CHECK_FOR_INTERRUPTS - if defined the sort is interruptible + * + * Instead of ST_ELEMENT_TYPE, ST_ELEMENT_TYPE_VOID can be defined. Then + * the generated functions will automatically gain an "element_size" + * parameter. This allows us to generate a traditional qsort function. + * + * One of the following macros must be defined, to show how to compare + * elements. The first two options are arbitrary expressions depending + * on whether an extra pass-through argument is desired, and the third + * option should be defined if the sort function should receive a + * function pointer at runtime. + * + * - ST_COMPARE(a, b) - a simple comparison expression + * - ST_COMPARE(a, b, arg) - variant that takes an extra argument + * - ST_COMPARE_RUNTIME_POINTER - sort function takes a function pointer + * + * To say that the comparator and therefore also sort function should + * receive an extra pass-through argument, specify the type of the + * argument. + * + * - ST_COMPARE_ARG_TYPE - type of extra argument + * + * The prototype of the generated sort function is: + * + * void ST_SORT(ST_ELEMENT_TYPE *data, size_t n, + * [size_t element_size,] + * [ST_SORT_compare_function compare,] + * [ST_COMPARE_ARG_TYPE *arg]); + * + * ST_SORT_compare_function is a function pointer of the following type: + * + * int (*)(const ST_ELEMENT_TYPE *a, const ST_ELEMENT_TYPE *b, + * [ST_COMPARE_ARG_TYPE *arg]) + * + * HISTORY + * + * Modifications from vanilla NetBSD source: + * - Add do ... while() macro fix + * - Remove __inline, _DIAGASSERTs, __P + * - Remove ill-considered "swap_cnt" switch to insertion sort, in favor + * of a simple check for presorted input. + * - Take care to recurse on the smaller partition, to bound stack usage + * - Convert into a header that can generate specialized functions + * + * IDENTIFICATION + * src/include/lib/sort_template.h + * + *------------------------------------------------------------------------- + */ + +/* $NetBSD: qsort.c,v 1.13 2003/08/07 16:43:42 agc Exp $ */ + +/*- + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Qsort routine based on J. L. Bentley and M. D. McIlroy, + * "Engineering a sort function", + * Software--Practice and Experience 23 (1993) 1249-1265. + * + * We have modified their original by adding a check for already-sorted + * input, which seems to be a win per discussions on pgsql-hackers around + * 2006-03-21. + * + * Also, we recurse on the smaller partition and iterate on the larger one, + * which ensures we cannot recurse more than log(N) levels (since the + * partition recursed to is surely no more than half of the input). Bentley + * and McIlroy explicitly rejected doing this on the grounds that it's "not + * worth the effort", but we have seen crashes in the field due to stack + * overrun, so that judgment seems wrong. + */ +#define CppConcat(x, y) x##y +#define pg_noinline __attribute__((noinline)) +typedef uint8_t uint8; +#define Min(a, b) ((a) < (b) ? (a) : (b)) + +#define ST_MAKE_PREFIX(a) CppConcat(a,_) +#define ST_MAKE_NAME(a,b) ST_MAKE_NAME_(ST_MAKE_PREFIX(a),b) +#define ST_MAKE_NAME_(a,b) CppConcat(a,b) + +/* + * If the element type is void, we'll also need an element_size argument + * because we don't know the size. + */ +#ifdef ST_ELEMENT_TYPE_VOID +#define ST_ELEMENT_TYPE void +#define ST_SORT_PROTO_ELEMENT_SIZE , size_t element_size +#define ST_SORT_INVOKE_ELEMENT_SIZE , element_size +#else +#define ST_SORT_PROTO_ELEMENT_SIZE +#define ST_SORT_INVOKE_ELEMENT_SIZE +#endif + +/* + * If the user wants to be able to pass in compare functions at runtime, + * we'll need to make that an argument of the sort and med3 functions. + */ +#ifdef ST_COMPARE_RUNTIME_POINTER +/* + * The type of the comparator function pointer that ST_SORT will take, unless + * you've already declared a type name manually and want to use that instead of + * having a new one defined. + */ +#ifndef ST_COMPARATOR_TYPE_NAME +#define ST_COMPARATOR_TYPE_NAME ST_MAKE_NAME(ST_SORT, compare_function) +#endif +#define ST_COMPARE compare +#ifndef ST_COMPARE_ARG_TYPE +#define ST_SORT_PROTO_COMPARE , ST_COMPARATOR_TYPE_NAME compare +#define ST_SORT_INVOKE_COMPARE , compare +#else +#define ST_SORT_PROTO_COMPARE , ST_COMPARATOR_TYPE_NAME compare +#define ST_SORT_INVOKE_COMPARE , compare +#endif +#else +#define ST_SORT_PROTO_COMPARE +#define ST_SORT_INVOKE_COMPARE +#endif + +/* + * If the user wants to use a compare function or expression that takes an + * extra argument, we'll need to make that an argument of the sort, compare and + * med3 functions. + */ +#ifdef ST_COMPARE_ARG_TYPE +#define ST_SORT_PROTO_ARG , ST_COMPARE_ARG_TYPE *arg +#define ST_SORT_INVOKE_ARG , arg +#else +#define ST_SORT_PROTO_ARG +#define ST_SORT_INVOKE_ARG +#endif + +#ifdef ST_DECLARE + +#ifdef ST_COMPARE_RUNTIME_POINTER +typedef int (*ST_COMPARATOR_TYPE_NAME) (const ST_ELEMENT_TYPE *, + const ST_ELEMENT_TYPE * ST_SORT_PROTO_ARG); +#endif + +/* Declare the sort function. Note optional arguments at end. */ +ST_SCOPE void ST_SORT(ST_ELEMENT_TYPE * first, size_t n + ST_SORT_PROTO_ELEMENT_SIZE + ST_SORT_PROTO_COMPARE + ST_SORT_PROTO_ARG); + +#endif + +#ifdef ST_DEFINE + +/* sort private helper functions */ +#define ST_MED3 ST_MAKE_NAME(ST_SORT, med3) +#define ST_SWAP ST_MAKE_NAME(ST_SORT, swap) +#define ST_SWAPN ST_MAKE_NAME(ST_SORT, swapn) + +/* Users expecting to run very large sorts may need them to be interruptible. */ +#ifdef ST_CHECK_FOR_INTERRUPTS +#define DO_CHECK_FOR_INTERRUPTS() CHECK_FOR_INTERRUPTS() +#else +#define DO_CHECK_FOR_INTERRUPTS() +#endif + +/* + * Create wrapper macros that know how to invoke compare, med3 and sort with + * the right arguments. + */ +#ifdef ST_COMPARE_RUNTIME_POINTER +#define DO_COMPARE(a_, b_) ST_COMPARE((a_), (b_) ST_SORT_INVOKE_ARG) +#elif defined(ST_COMPARE_ARG_TYPE) +#define DO_COMPARE(a_, b_) ST_COMPARE((a_), (b_), arg) +#else +#define DO_COMPARE(a_, b_) ST_COMPARE((a_), (b_)) +#endif +#define DO_MED3(a_, b_, c_) \ + ST_MED3((a_), (b_), (c_) \ + ST_SORT_INVOKE_COMPARE \ + ST_SORT_INVOKE_ARG) +#define DO_SORT(a_, n_) \ + ST_SORT((a_), (n_) \ + ST_SORT_INVOKE_ELEMENT_SIZE \ + ST_SORT_INVOKE_COMPARE \ + ST_SORT_INVOKE_ARG) + +/* + * If we're working with void pointers, we'll use pointer arithmetic based on + * uint8, and use the runtime element_size to step through the array and swap + * elements. Otherwise we'll work with ST_ELEMENT_TYPE. + */ +#ifndef ST_ELEMENT_TYPE_VOID +#define ST_POINTER_TYPE ST_ELEMENT_TYPE +#define ST_POINTER_STEP 1 +#define DO_SWAPN(a_, b_, n_) ST_SWAPN((a_), (b_), (n_)) +#define DO_SWAP(a_, b_) ST_SWAP((a_), (b_)) +#else +#define ST_POINTER_TYPE uint8 +#define ST_POINTER_STEP element_size +#define DO_SWAPN(a_, b_, n_) ST_SWAPN((a_), (b_), (n_)) +#define DO_SWAP(a_, b_) DO_SWAPN((a_), (b_), element_size) +#endif + +/* + * Find the median of three values. Currently, performance seems to be best + * if the comparator is inlined here, but the med3 function is not inlined + * in the qsort function. + */ +static pg_noinline ST_ELEMENT_TYPE * +ST_MED3(ST_ELEMENT_TYPE * a, + ST_ELEMENT_TYPE * b, + ST_ELEMENT_TYPE * c + ST_SORT_PROTO_COMPARE + ST_SORT_PROTO_ARG) +{ + return DO_COMPARE(a, b) < 0 ? + (DO_COMPARE(b, c) < 0 ? b : (DO_COMPARE(a, c) < 0 ? c : a)) + : (DO_COMPARE(b, c) > 0 ? b : (DO_COMPARE(a, c) < 0 ? a : c)); +} + +static inline void +ST_SWAP(ST_POINTER_TYPE * a, ST_POINTER_TYPE * b) +{ + ST_POINTER_TYPE tmp = *a; + + *a = *b; + *b = tmp; +} + +static inline void +ST_SWAPN(ST_POINTER_TYPE * a, ST_POINTER_TYPE * b, size_t n) +{ + for (size_t i = 0; i < n; ++i) + ST_SWAP(&a[i], &b[i]); +} + +/* + * Sort an array. + */ +ST_SCOPE void +ST_SORT(ST_ELEMENT_TYPE * data, size_t n + ST_SORT_PROTO_ELEMENT_SIZE + ST_SORT_PROTO_COMPARE + ST_SORT_PROTO_ARG) +{ + ST_POINTER_TYPE *a = (ST_POINTER_TYPE *) data, + *pa, + *pb, + *pc, + *pd, + *pl, + *pm, + *pn; + size_t d1, + d2; + int r, + presorted; + +loop: + DO_CHECK_FOR_INTERRUPTS(); + if (n < 7) + { + for (pm = a + ST_POINTER_STEP; pm < a + n * ST_POINTER_STEP; + pm += ST_POINTER_STEP) + for (pl = pm; pl > a && DO_COMPARE(pl - ST_POINTER_STEP, pl) > 0; + pl -= ST_POINTER_STEP) + DO_SWAP(pl, pl - ST_POINTER_STEP); + return; + } + presorted = 1; + for (pm = a + ST_POINTER_STEP; pm < a + n * ST_POINTER_STEP; + pm += ST_POINTER_STEP) + { + DO_CHECK_FOR_INTERRUPTS(); + if (DO_COMPARE(pm - ST_POINTER_STEP, pm) > 0) + { + presorted = 0; + break; + } + } + if (presorted) + return; + pm = a + (n / 2) * ST_POINTER_STEP; + if (n > 7) + { + pl = a; + pn = a + (n - 1) * ST_POINTER_STEP; + if (n > 40) + { + size_t d = (n / 8) * ST_POINTER_STEP; + + pl = DO_MED3(pl, pl + d, pl + 2 * d); + pm = DO_MED3(pm - d, pm, pm + d); + pn = DO_MED3(pn - 2 * d, pn - d, pn); + } + pm = DO_MED3(pl, pm, pn); + } + DO_SWAP(a, pm); + pa = pb = a + ST_POINTER_STEP; + pc = pd = a + (n - 1) * ST_POINTER_STEP; + for (;;) + { + while (pb <= pc && (r = DO_COMPARE(pb, a)) <= 0) + { + if (r == 0) + { + DO_SWAP(pa, pb); + pa += ST_POINTER_STEP; + } + pb += ST_POINTER_STEP; + DO_CHECK_FOR_INTERRUPTS(); + } + while (pb <= pc && (r = DO_COMPARE(pc, a)) >= 0) + { + if (r == 0) + { + DO_SWAP(pc, pd); + pd -= ST_POINTER_STEP; + } + pc -= ST_POINTER_STEP; + DO_CHECK_FOR_INTERRUPTS(); + } + if (pb > pc) + break; + DO_SWAP(pb, pc); + pb += ST_POINTER_STEP; + pc -= ST_POINTER_STEP; + } + pn = a + n * ST_POINTER_STEP; + d1 = Min(pa - a, pb - pa); + DO_SWAPN(a, pb - d1, d1); + d1 = Min(pd - pc, pn - pd - ST_POINTER_STEP); + DO_SWAPN(pb, pn - d1, d1); + d1 = pb - pa; + d2 = pd - pc; + if (d1 <= d2) + { + /* Recurse on left partition, then iterate on right partition */ + if (d1 > ST_POINTER_STEP) + DO_SORT(a, d1 / ST_POINTER_STEP); + if (d2 > ST_POINTER_STEP) + { + /* Iterate rather than recurse to save stack space */ + /* DO_SORT(pn - d2, d2 / ST_POINTER_STEP) */ + a = pn - d2; + n = d2 / ST_POINTER_STEP; + goto loop; + } + } + else + { + /* Recurse on right partition, then iterate on left partition */ + if (d2 > ST_POINTER_STEP) + DO_SORT(pn - d2, d2 / ST_POINTER_STEP); + if (d1 > ST_POINTER_STEP) + { + /* Iterate rather than recurse to save stack space */ + /* DO_SORT(a, d1 / ST_POINTER_STEP) */ + n = d1 / ST_POINTER_STEP; + goto loop; + } + } +} +#endif + +#undef DO_CHECK_FOR_INTERRUPTS +#undef DO_COMPARE +#undef DO_MED3 +#undef DO_SORT +#undef DO_SWAP +#undef DO_SWAPN +#undef ST_CHECK_FOR_INTERRUPTS +#undef ST_COMPARATOR_TYPE_NAME +#undef ST_COMPARE +#undef ST_COMPARE_ARG_TYPE +#undef ST_COMPARE_RUNTIME_POINTER +#undef ST_ELEMENT_TYPE +#undef ST_ELEMENT_TYPE_VOID +#undef ST_MAKE_NAME +#undef ST_MAKE_NAME_ +#undef ST_MAKE_PREFIX +#undef ST_MED3 +#undef ST_POINTER_STEP +#undef ST_POINTER_TYPE +#undef ST_SCOPE +#undef ST_SORT +#undef ST_SORT_INVOKE_ARG +#undef ST_SORT_INVOKE_COMPARE +#undef ST_SORT_INVOKE_ELEMENT_SIZE +#undef ST_SORT_PROTO_ARG +#undef ST_SORT_PROTO_COMPARE +#undef ST_SORT_PROTO_ELEMENT_SIZE +#undef ST_SWAP +#undef ST_SWAPN diff --git a/src/fu_util/test/sort.c b/src/fu_util/test/sort.c new file mode 100644 index 000000000..41472755c --- /dev/null +++ b/src/fu_util/test/sort.c @@ -0,0 +1,257 @@ +/* vim: set expandtab autoindent cindent ts=4 sw=4 sts=0 */ +#include +#include +#include +#include +#include +#include +#include "./qsort/qsort_pg.inc.c" + +static void +check_sorted(int *a, int len) { + for (; len > 1 ; len--) { + ft_assert(a[len-2] <= a[len-1]); + } +} + +static void +fill_ascending(int *a, int len) { + int i = 0; + for (; i < len ; i++) + a[i] = i; +} + +static void +fill_descending(int *a, int len) { + int i = 0; + for (; i < len ; i++) + a[i] = len - i; +} + +static void +fill_saw_1(int *a, int len) { + int i = 0; + for (; i < len/2 ; i++) + a[i] = i; + for (; i < len ; i++) + a[i] = len-i; +} + +static void +fill_saw_2(int *a, int len) { + int i = 0; + for (; i < len/2 ; i++) + a[i] = len-i; + for (; i < len ; i++) + a[i] = i; +} + +#define rand_init(len) \ + uint32_t r, rand = 0xdeadbeef ^ (uint32_t)len +#define rand_step do { \ + r = rand; \ + rand = rand * 0xcafedead + 0xbeef; \ + r = (r ^ (rand >> 16)) * 0x51235599; \ +} while(0) + + +static void +fill_flip(int *a, int len) { + int i = 0; + rand_init(len); + for (; i < len ; i++) + { + rand_step; + a[i] = r >> 31; + } +} + +static void +fill_several(int *a, int len) { + int i = 0; + rand_init(len); + for (; i < len ; i++) + { + rand_step; + a[i] = r >> 28; + } +} + +static void +fill_rand(int *a, int len) { + int i = 0; + uint32_t max = (uint32_t)len; + rand_init(len); + for (; i < len ; i++) + { + rand_step; + a[i] = ((uint64_t)r * max) >> 32; + } +} + +static void +fill_rand_div5(int *a, int len) { + int i = 0; + uint32_t max = (uint32_t)len / 5 + 1; + rand_init(len); + for (; i < len ; i++) + { + rand_step; + a[i] = ((uint64_t)r * max) >> 32; + } +} + +static void +fill_asc_swap_tail4(int *a, int len) { + int i = 0, j; + rand_init(len); + fill_ascending(a, len); + if (len < 8) + return; + for (; i < 4 ; i++) + { + rand_step; + j = ((uint64_t)r * (uint32_t)(len-4)) >> 32; + ft_swap(&a[len - 1 - i], &a[j]); + } +} + +static void +fill_asc_swap_head4(int *a, int len) { + int i = 0, j; + rand_init(len); + fill_ascending(a, len); + if (len < 8) + return; + for (; i < 4 ; i++) + { + rand_step; + j = ((uint64_t)r * (uint32_t)(len-5)) >> 32; + ft_swap(&a[i], &a[4+j]); + } +} + +static uint64_t ncomp = 0; + +static int +int_cmp_raw2(int a, int b) { + ncomp++; + return ft_cmp(a, b); +} + +static void ft_unused +sort_shell(int *a, int len) { + ft_shsort_int(a, len, int_cmp_raw2); +} + +static void ft_unused +sort_quick(int *a, int len) { + ft_qsort_int(a, len, int_cmp_raw2); +} + +static int +compare_int(const void *pa, const void *pb) { + int a = *(const int *)pa; + int b = *(const int *)pb; + ncomp++; + return a < b ? -1 : a > b; +} + +static void ft_unused +sort_qsort(int *a, int len) { + qsort(a, len, sizeof(len), compare_int); +} + +static void ft_unused +sort_qsort_pg(int *a, int len) { + pg_qsort(a, len, sizeof(len), compare_int); +} + +#define ST_SORT sort_qsort_pg2_ +#define ST_ELEMENT_TYPE int +#define ST_COMPARE(a, b) (ncomp++, *(a) < *(b) ? -1 : *(a) > *(b)) +#define ST_SCOPE static +#define ST_DECLARE +#define ST_DEFINE +#include "./qsort/sort_template.h" + +static void +sort_qsort_pg2(int *a, int len) { + sort_qsort_pg2_(a, len); +} + + +typedef void (*tfiller)(int *, int); +typedef void (*tsorter)(int *, int); + +static double +mtime(void) { + struct timespec ts = {0, 0}; + clock_gettime(CLOCK_MONOTONIC, &ts); + return (double)ts.tv_sec + (double)ts.tv_nsec/1e9; +} + + +int +main(void) { + int verbose = getenv("VERBOSE") ? atoi(getenv("VERBOSE")) : 0; + int ex[] = {8, 4, 0, 2, 6, 32, 12}; + ft_shsort_int(ex, 7, ft_int_cmp); + check_sorted(ex, 7); + + const char *sex[] = {"hi", "ho", "no", "yes", "obhs", "dump", "vamp"}; + ft_shsort_cstr(sex, 7, strcmp); + for (int i = 0; i < 6; i++) + ft_assert(strcmp(sex[i], sex[i+1]) < 0); + +#define VS(v) {v, #v} + struct { tfiller f; const char *name; } fillers[] = { + VS(fill_ascending), + VS(fill_descending), + VS(fill_rand), + VS(fill_rand_div5), + VS(fill_several), + VS(fill_flip), + VS(fill_saw_1), + VS(fill_saw_2), + VS(fill_asc_swap_head4), + VS(fill_asc_swap_tail4), + }; + struct { tsorter sorter; const char* name; } sorters[] = { + VS(sort_shell), + VS(sort_quick), + VS(sort_qsort), + VS(sort_qsort_pg), + VS(sort_qsort_pg2), + }; + int sizes[] = {1, 2, 3, 5, 10, 20, 50, 100, 500, 1000, 2000, 100000}; + int sz, fl, srt; + int *ar, *cp; + for (sz = 0; sz < ft_arrsz(sizes); sz++) { + if (verbose) + printf("sz: %d\n", sizes[sz]); + ar = calloc(sizeof(int), sizes[sz]); + cp = calloc(sizeof(int), sizes[sz]); + for(fl = 0; fl < ft_arrsz(fillers); fl++) { + fillers[fl].f(ar, sizes[sz]); + if (verbose) + printf(" filler: %s\n", fillers[fl].name); + for (srt = 0; srt < ft_arrsz(sorters); srt++) { + double tend, tstart; + ncomp = 0; + memcpy(cp, ar, sizeof(int)*sizes[sz]); + tstart = mtime(); + sorters[srt].sorter(cp, sizes[sz]); + tend = mtime(); + check_sorted(cp, sizes[sz]); + if (verbose) + printf(" %s: %.6f\tcmp: %llu\n", + sorters[srt].name, + tend - tstart, + (unsigned long long)ncomp); + } + } + free(ar); + free(cp); + } +} diff --git a/src/fu_util/test/sort_p.c b/src/fu_util/test/sort_p.c new file mode 100644 index 000000000..f4b10cb3c --- /dev/null +++ b/src/fu_util/test/sort_p.c @@ -0,0 +1,271 @@ +/* vim: set expandtab autoindent cindent ts=4 sw=4 sts=0 */ +#include +#include +#include +#include +#include +#include +#include "./qsort/qsort.inc.c" +#include "./qsort/qsort_pg.inc.c" + +static void +fill_ascending(int *a, int len) { + int i = 0; + for (; i < len ; i++) + a[i] = i; +} + +static void +fill_descending(int *a, int len) { + int i = 0; + for (; i < len ; i++) + a[i] = len - i; +} + +static void +fill_saw_1(int *a, int len) { + int i = 0; + for (; i < len/2 ; i++) + a[i] = i; + for (; i < len ; i++) + a[i] = len-i; +} + +static void +fill_saw_2(int *a, int len) { + int i = 0; + for (; i < len/2 ; i++) + a[i] = len-i; + for (; i < len ; i++) + a[i] = i; +} + +#define rand_init(len) \ + uint32_t r, rand = 0xdeadbeef ^ (uint32_t)len +#define rand_step do { \ + r = rand; \ + rand = rand * 0xcafedead + 0xbeef; \ + r = (r ^ (rand >> 16)) * 0x51235599; \ +} while(0) + +static void +fill_flip(int *a, int len) { + int i = 0; + rand_init(len); + for (; i < len ; i++) + { + rand_step; + a[i] = r >> 31; + } +} + +static void +fill_several(int *a, int len) { + int i = 0; + rand_init(len); + for (; i < len ; i++) + { + rand_step; + a[i] = r >> 28; + } +} + +static void +fill_rand(int *a, int len) { + int i = 0; + uint32_t max = (uint32_t)len; + rand_init(len); + for (; i < len ; i++) + { + rand_step; + a[i] = ((uint64_t)r * max) >> 32; + } +} + +static void +fill_rand_div5(int *a, int len) { + int i = 0; + uint32_t max = (uint32_t)len / 5 + 1; + rand_init(len); + for (; i < len ; i++) + { + rand_step; + a[i] = ((uint64_t)r * max) >> 32; + } +} + +static void +fill_asc_swap_tail4(int *a, int len) { + int i = 0, j; + rand_init(len); + fill_ascending(a, len); + if (len < 16) + return; + for (; i < 8 ; i++) + { + rand_step; + j = ((uint64_t)r * (uint32_t)(len-9)) >> 32; + ft_swap(&a[len - 1 - i], &a[j]); + } +} + +static void +fill_asc_swap_head4(int *a, int len) { + int i = 0, j; + rand_init(len); + fill_ascending(a, len); + if (len < 16) + return; + for (; i < 8 ; i++) + { + rand_step; + j = ((uint64_t)r * (uint32_t)(len-9)) >> 32; + ft_swap(&a[i], &a[8+j]); + } +} + +static const char **ref = NULL; + +static void +fill_ref(int len) { + int i = 0, ignore ft_unused; + ref = calloc(sizeof(char*), len); + for (; i < len; i++) + { + ref[i] = ft_asprintf("%08x", i).ptr; + } +} + +static void +clear_ref(int len) { + int i = 0; + ref = calloc(sizeof(char*), len); + for (; i < len; i++) + free((void*)ref[i]); + free(ref); + ref = NULL; +} + +static uint64_t ncomp = 0; + +static FT_CMP_RES +compare_int_raw(int a, int b) { + ncomp++; + return ft_cstr_cmp(ref[a], ref[b]); +} + +static void ft_unused +sort_shell(int *a, int len) { + ft_shsort_int(a, len, compare_int_raw); +} + +static void ft_unused +sort_quick(int *a, int len) { + ft_qsort_int(a, len, compare_int_raw); +} + +static int +compare_int(const void *pa, const void *pb) { + int a = *(const int *)pa; + int b = *(const int *)pb; + ncomp++; + return strcmp(ref[a], ref[b]); +} + +static int +compare_int_v(const void *pa, const void *pb, void *p) { + int a = *(const int *)pa; + int b = *(const int *)pb; + ncomp++; + return strcmp(ref[a], ref[b]); +} + +static void ft_unused +sort_qsort(int *a, int len) { + qsort(a, len, sizeof(len), compare_int); +} + +static void ft_unused +sort_qsort_cpy(int *a, int len) { + _quicksort(a, len, sizeof(len), compare_int_v, NULL); +} + +static void ft_unused +sort_qsort_pg(int *a, int len) { + pg_qsort(a, len, sizeof(len), compare_int); +} + +static void +check_sorted(int *a, int len) { + for (; len > 1 ; len--) { + ft_assert(strcmp(ref[a[len-2]], ref[a[len-1]]) <= 0); + } +} + +typedef void (*tfiller)(int *, int); +typedef void (*tsorter)(int *, int); + +static double +mtime(void) { + struct timespec ts = {0, 0}; + clock_gettime(CLOCK_MONOTONIC, &ts); + return (double)ts.tv_sec + (double)ts.tv_nsec/1e9; +} + + +int +main(void) { + int verbose = getenv("VERBOSE") ? atoi(getenv("VERBOSE")) : 0; +#define VS(v) {v, #v} + struct { tfiller f; const char *name; } fillers[] = { + VS(fill_ascending), + VS(fill_descending), + VS(fill_rand), + VS(fill_rand_div5), + VS(fill_several), + VS(fill_flip), + VS(fill_saw_1), + VS(fill_saw_2), + VS(fill_asc_swap_head4), + VS(fill_asc_swap_tail4), + }; + struct { tsorter sorter; const char* name; } sorters[] = { + VS(sort_shell), + VS(sort_quick), + VS(sort_qsort), + VS(sort_qsort_cpy), + VS(sort_qsort_pg), + }; + int sizes[] = {1, 2, 3, 5, 10, 20, 50, 100, 500, 1000, 2000, 100000}; + int sz, fl, srt; + int *ar, *cp; + for (sz = 0; sz < ft_arrsz(sizes); sz++) { + if (verbose) + printf("sz: %d\n", sizes[sz]); + ar = calloc(sizeof(int), sizes[sz]); + cp = calloc(sizeof(int), sizes[sz]); + fill_ref(ft_max(sizes[sz]+2, 32)); + for(fl = 0; fl < ft_arrsz(fillers); fl++) { + fillers[fl].f(ar, sizes[sz]); + if (verbose) + printf(" filler: %s\n", fillers[fl].name); + for (srt = 0; srt < ft_arrsz(sorters); srt++) { + double tend, tstart; + ncomp = 0; + memcpy(cp, ar, sizeof(int)*sizes[sz]); + tstart = mtime(); + sorters[srt].sorter(cp, sizes[sz]); + tend = mtime(); + check_sorted(cp, sizes[sz]); + if (verbose) + printf(" %s: %.6f\tcmp: %llu\n", + sorters[srt].name, + tend - tstart, + (unsigned long long)ncomp); + } + } + free(ar); + free(cp); + clear_ref(ft_max(sizes[sz]+2, 32)); + } +} diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 5bec84572..40f4edf48 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -295,6 +295,8 @@ main(int argc, char *argv[]) char *command = NULL; ProbackupSubcmd backup_subcmd = NO_CMD; + ft_init_log(elog_ft_log); + fobj_init(); PROGRAM_NAME_FULL = argv[0]; /* Check terminal presense and initialize ANSI escape codes for Windows */ diff --git a/src/pg_probackup.h b/src/pg_probackup.h index e862d1c20..2aec18a56 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -10,6 +10,7 @@ #ifndef PG_PROBACKUP_H #define PG_PROBACKUP_H +#include #include "postgres_fe.h" #include "libpq-fe.h" diff --git a/src/utils/logger.c b/src/utils/logger.c index 70bd5dcc4..b4da71721 100644 --- a/src/utils/logger.c +++ b/src/utils/logger.c @@ -48,7 +48,6 @@ void pg_log(eLogType type, const char *fmt,...) pg_attribute_printf(2, 3); static void elog_internal(int elevel, bool file_only, const char *message); static void elog_stderr(int elevel, const char *fmt, ...) pg_attribute_printf(2, 3); -static char *get_log_message(const char *fmt, va_list args) pg_attribute_printf(1, 0); /* Functions to work with log files */ static void open_logfile(FILE **file, const char *filename_format); @@ -64,6 +63,7 @@ static FILE *error_log_file = NULL; static bool exit_hook_registered = false; /* Logging of the current thread is in progress */ static bool loggin_in_progress = false; +static __thread bool thread_terminates = false; static pthread_mutex_t log_file_mutex = PTHREAD_MUTEX_INITIALIZER; @@ -235,7 +235,7 @@ write_elevel(FILE *stream, int elevel) static void exit_if_necessary(int elevel) { - if (elevel > WARNING && !in_cleanup) + if (elevel > WARNING && !in_cleanup && !thread_terminates) { if (loggin_in_progress) { @@ -249,6 +249,8 @@ exit_if_necessary(int elevel) /* If this is not the main thread then don't call exit() */ if (main_tid != pthread_self()) { + /* Notice this thread is quiting */ + thread_terminates = true; /* Interrupt other possible routines */ thread_interrupted = true; #ifdef WIN32 @@ -407,37 +409,6 @@ elog_stderr(int elevel, const char *fmt, ...) exit_if_necessary(elevel); } -/* - * Formats text data under the control of fmt and returns it in an allocated - * buffer. - */ -static char * -get_log_message(const char *fmt, va_list args) -{ - size_t len = 256; /* initial assumption about buffer size */ - - for (;;) - { - char *result; - size_t newlen; - va_list copy_args; - - result = (char *) pgut_malloc(len); - - /* Try to format the data */ - va_copy(copy_args, args); - newlen = pvsnprintf(result, len, fmt, copy_args); - va_end(copy_args); - - if (newlen < len) - return result; /* success */ - - /* Release buffer and loop around to try again with larger len. */ - pfree(result); - len = newlen; - } -} - /* * Logs to stderr or to log file and exit if ERROR. */ @@ -456,11 +427,11 @@ elog(int elevel, const char *fmt, ...) return; va_start(args, fmt); - message = get_log_message(fmt, args); + message = ft_vasprintf(fmt, args).ptr; va_end(args); elog_internal(elevel, false, message); - pfree(message); + ft_free(message); } /* @@ -480,11 +451,65 @@ elog_file(int elevel, const char *fmt, ...) return; va_start(args, fmt); - message = get_log_message(fmt, args); + message = ft_vasprintf(fmt, args).ptr; va_end(args); elog_internal(elevel, true, message); - pfree(message); + free(message); +} + +/* + * Wrapper for ft_log + */ +void +elog_ft_log(enum FT_LOG_LEVEL ft_level, ft_source_position_t srcpos, + const char* error, const char *fmt, va_list args) +{ +#define ERR_MAX 1024 +#define MSG_MAX 4096 + char message[MSG_MAX]; + size_t sz; + int elevel; + + switch (ft_level) + { + case FT_TRACE: + case FT_DEBUG: + elevel = VERBOSE; + break; + case FT_LOG: + elevel = LOG; + break; + case FT_INFO: + elevel = INFO; + break; + case FT_WARNING: + elevel = WARNING; + break; + case FT_FATAL: + case FT_ERROR: + elevel = ERROR; + break; + default: + elevel = WARNING; + } + + /* + * Do not log message if severity level is less than log_level. + * It is the little optimisation to put it here not in elog_internal(). + */ + if (elevel < logger_config.log_level_console && + elevel < logger_config.log_level_file && elevel < ERROR) + return; + + /* don't use ft_vasprintf since it could recurse to logging */ + sz = vsnprintf(message, MSG_MAX, fmt, args); + if (error != NULL && sz < MSG_MAX) { + ft_strlcat(message + sz, ": ", MSG_MAX-sz); + ft_strlcat(message + sz, error, MSG_MAX-sz); + } + + elog_internal(elevel, false, message); } /* @@ -526,11 +551,11 @@ pg_log(eLogType type, const char *fmt, ...) return; va_start(args, fmt); - message = get_log_message(fmt, args); + message = ft_vasprintf(fmt, args).ptr; va_end(args); elog_internal(elevel, false, message); - pfree(message); + ft_free(message); } /* diff --git a/src/utils/logger.h b/src/utils/logger.h index 6a7407e41..3412619bb 100644 --- a/src/utils/logger.h +++ b/src/utils/logger.h @@ -49,6 +49,9 @@ extern LoggerConfig logger_config; #undef elog extern void elog(int elevel, const char *fmt, ...) pg_attribute_printf(2, 3); extern void elog_file(int elevel, const char *fmt, ...) pg_attribute_printf(2, 3); +extern void elog_ft_log(enum FT_LOG_LEVEL, ft_source_position_t srcpos, + const char* error, const char *fmt, va_list args) + pg_attribute_printf(4, 0); extern void init_logger(const char *root_path, LoggerConfig *config); extern void init_console(void); From fb802646b7ee127840c9c2f8224ebd84e73d1fc6 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 5 Jul 2022 10:46:56 +0300 Subject: [PATCH 023/339] pio interface --- src/pg_probackup.c | 2 + src/utils/file.c | 1574 ++++++++++++++++++++++++++++++++++++++++++-- src/utils/file.h | 136 +++- 3 files changed, 1650 insertions(+), 62 deletions(-) diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 40f4edf48..ff5ab85d3 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -297,6 +297,8 @@ main(int argc, char *argv[]) ft_init_log(elog_ft_log); fobj_init(); + init_pio_objects(); + PROGRAM_NAME_FULL = argv[0]; /* Check terminal presense and initialize ANSI escape codes for Windows */ diff --git a/src/utils/file.c b/src/utils/file.c index 484898d19..3d74c37f9 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -9,7 +9,6 @@ #include "storage/checksum.h" #define PRINTF_BUF_SIZE 1024 -#define FILE_PERMISSIONS 0600 static __thread unsigned long fio_fdset = 0; static __thread void* fio_stdin_buffer; @@ -472,7 +471,7 @@ fio_open(fio_location location, const char* path, int mode) } else { - fd = open(path, mode, FILE_PERMISSIONS); + fd = open(path, mode, FILE_PERMISSION); } return fd; } @@ -1111,6 +1110,7 @@ fio_read(int fd, void* buf, size_t size) IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); Assert(hdr.cop == FIO_SEND); IO_CHECK(fio_read_all(fio_stdin, buf, hdr.size), hdr.size); + errno = hdr.arg; return hdr.size; } @@ -1388,7 +1388,7 @@ fio_sync(fio_location location, const char* path) { int fd; - fd = open(path, O_WRONLY | PG_BINARY, FILE_PERMISSIONS); + fd = open(path, O_WRONLY | PG_BINARY, FILE_PERMISSION); if (fd < 0) return -1; @@ -1709,7 +1709,7 @@ fio_gzopen(fio_location location, const char* path, const char* mode, int level) /* check if file opened for writing */ if (strcmp(mode, PG_BINARY_W) == 0) { - int fd = open(path, O_WRONLY | O_CREAT | O_EXCL | PG_BINARY, FILE_PERMISSIONS); + int fd = open(path, O_WRONLY | O_CREAT | O_EXCL | PG_BINARY, FILE_PERMISSION); if (fd < 0) return NULL; file = gzdopen(fd, mode); @@ -2813,6 +2813,73 @@ fio_send_file(const char *from_fullpath, const char *to_fullpath, FILE* out, return exit_code; } +/* Send open file content + * On error we return FIO_ERROR message with following codes + * FIO_ERROR: + * FILE_MISSING (-1) + * OPEN_FAILED (-2) + * READ_FAILED (-3) + * + * FIO_PAGE + * FIO_SEND_FILE_EOF + * + */ +static bool +fio_send_file_content_impl(int fd, int out, const char* path) +{ + fio_header hdr; + int save_errno; + char *buf = pgut_malloc(CHUNK_SIZE); + size_t read_len = 0; + char *errormsg = NULL; + + /* copy content */ + for (;;) + { + read_len = fio_read_all(fd, buf, CHUNK_SIZE); + + /* report error */ + if (read_len < 0) + { + save_errno = errno; + hdr.cop = FIO_ERROR; + errormsg = pgut_malloc(ERRMSG_MAX_LEN); + hdr.arg = READ_FAILED; + /* Construct the error message */ + snprintf(errormsg, ERRMSG_MAX_LEN, "Cannot read from file '%s': %s", + path, strerror(save_errno)); + hdr.size = strlen(errormsg) + 1; + /* send header and message */ + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_write_all(out, errormsg, hdr.size), hdr.size); + + free(errormsg); + free(buf); + + return false; + } + + if (read_len > 0) + { + /* send chunk */ + hdr.cop = FIO_PAGE; + hdr.size = read_len; + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_write_all(out, buf, read_len), read_len); + } + + if (read_len == 0) + break; + } + + /* we are done, send eof */ + hdr.cop = FIO_SEND_FILE_EOF; + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); + + free(buf); + return true; +} + /* Send file content * On error we return FIO_ERROR message with following codes * FIO_ERROR: @@ -2827,16 +2894,15 @@ fio_send_file(const char *from_fullpath, const char *to_fullpath, FILE* out, static void fio_send_file_impl(int out, const char* path) { - FILE *fp; - fio_header hdr; - char *buf = pgut_malloc(CHUNK_SIZE); - size_t read_len = 0; + int fd; + int save_errno; + fio_header hdr; char *errormsg = NULL; /* open source file for read */ /* TODO: check that file is regular file */ - fp = fopen(path, PG_BINARY_R); - if (!fp) + fd = open(path, O_RDONLY | PG_BINARY); + if (fd < 0) { hdr.cop = FIO_ERROR; @@ -2851,68 +2917,29 @@ fio_send_file_impl(int out, const char* path) } else { + save_errno = errno; hdr.arg = OPEN_FAILED; errormsg = pgut_malloc(ERRMSG_MAX_LEN); /* Construct the error message */ - snprintf(errormsg, ERRMSG_MAX_LEN, "Cannot open file '%s': %s", path, strerror(errno)); + snprintf(errormsg, ERRMSG_MAX_LEN, "Cannot open file '%s': %s", + path, strerror(save_errno)); hdr.size = strlen(errormsg) + 1; } /* send header and message */ IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); if (errormsg) - IO_CHECK(fio_write_all(out, errormsg, hdr.size), hdr.size); - - goto cleanup; - } + { + IO_CHECK(fio_write_all(out, errormsg, hdr.size), hdr.size); + free(errormsg); + } - /* disable stdio buffering */ - setvbuf(fp, NULL, _IONBF, BUFSIZ); - - /* copy content */ - for (;;) - { - read_len = fread(buf, 1, CHUNK_SIZE, fp); - - /* report error */ - if (ferror(fp)) - { - hdr.cop = FIO_ERROR; - errormsg = pgut_malloc(ERRMSG_MAX_LEN); - hdr.arg = READ_FAILED; - /* Construct the error message */ - snprintf(errormsg, ERRMSG_MAX_LEN, "Cannot read from file '%s': %s", path, strerror(errno)); - hdr.size = strlen(errormsg) + 1; - /* send header and message */ - IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(out, errormsg, hdr.size), hdr.size); - - goto cleanup; - } - - if (read_len > 0) - { - /* send chunk */ - hdr.cop = FIO_PAGE; - hdr.size = read_len; - IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(out, buf, read_len), read_len); - } - - if (feof(fp)) - break; + return; } - /* we are done, send eof */ - hdr.cop = FIO_SEND_FILE_EOF; - IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); + fio_send_file_content_impl(fd, out, path); -cleanup: - if (fp) - fclose(fp); - pg_free(buf); - pg_free(errormsg); - return; + close(fd); } /* Compile the array of files located on remote machine in directory root */ @@ -3384,7 +3411,7 @@ fio_communicate(int in, int out) SYS_CHECK(closedir(dir[hdr.handle])); break; case FIO_OPEN: /* Open file */ - fd[hdr.handle] = open(buf, hdr.arg, FILE_PERMISSIONS); + fd[hdr.handle] = open(buf, hdr.arg, FILE_PERMISSION); hdr.arg = fd[hdr.handle] < 0 ? errno : 0; hdr.size = 0; IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); @@ -3407,9 +3434,11 @@ fio_communicate(int in, int out) buf_size = hdr.arg; buf = (char*)realloc(buf, buf_size); } + errno = 0; rc = read(fd[hdr.handle], buf, hdr.arg); hdr.cop = FIO_SEND; hdr.size = rc > 0 ? rc : 0; + hdr.arg = rc >= 0 ? 0 : errno; IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); if (hdr.size != 0) IO_CHECK(fio_write_all(out, buf, hdr.size), hdr.size); @@ -3471,9 +3500,12 @@ fio_communicate(int in, int out) case FIO_SEND_FILE: fio_send_file_impl(out, buf); break; + case FIO_SEND_FILE_CONTENT: + fio_send_file_content_impl(fd[hdr.handle], out, buf); + break; case FIO_SYNC: /* open file and fsync it */ - tmp_fd = open(buf, O_WRONLY | PG_BINARY, FILE_PERMISSIONS); + tmp_fd = open(buf, O_WRONLY | PG_BINARY, FILE_PERMISSION); if (tmp_fd < 0) hdr.arg = errno; else @@ -3487,6 +3519,13 @@ fio_communicate(int in, int out) IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); break; + case FIO_SYNC_FILE: + if (fsync(fd[hdr.handle]) == 0) + hdr.arg = 0; + else + hdr.arg = errno; + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); + break; case FIO_GET_CRC32: /* calculate crc32 for a file */ if (hdr.arg == 1) @@ -3542,3 +3581,1416 @@ fio_communicate(int in, int out) exit(EXIT_FAILURE); } } + +// CLASSES +typedef struct pioError { + fobjErr p; /* parent */ + int _errno; +} pioError; + +typedef struct pioLocalDrive +{ +} pioLocalDrive; + +typedef struct pioRemoteDrive +{ +} pioRemoteDrive; + +typedef struct pioFile +{ + const char *path; + int flags; + bool closed; +} pioFile; + +typedef struct pioLocalFile +{ + pioFile p; + int fd; +} pioLocalFile; + +typedef struct pioRemoteFile +{ + pioFile p; + int handle; + bool asyncMode; + bool asyncEof; + bool didAsync; + err_i asyncError; + /* chunks size is CHUNK_SIZE */ + void* asyncChunk; + ft_bytes_t chunkRest; +} pioRemoteFile; + +typedef struct pioReadFilter { + pioRead_i wrapped; + pioFilter_i filter; + char* buffer; + size_t len; + size_t capa; + bool eof; + bool finished; +} pioReadFilter; + +typedef struct pioWriteFilter { + pioWriteFlush_i wrapped; + pioFilter_i filter; + char* buffer; + size_t capa; + bool finished; +} pioWriteFilter; + +#ifdef HAVE_LIBZ +typedef struct pioGZError { + fobjErr p; /* parent */ + int _gzerrno; +} pioGZError; + +typedef struct pioGZCompress { + z_stream strm; + bool finished; +} pioGZCompress; + +typedef struct pioGZDecompress { + z_stream strm; + bool eof; + bool finished; + bool ignoreTruncate; +} pioGZDecompress; + +#define kls__pioGZCompress iface__pioFilter, mth(fobjDispose), iface(pioFilter) +fobj_klass(pioGZCompress); +#define kls__pioGZDecompress iface__pioFilter, mth(fobjDispose), iface(pioFilter) +fobj_klass(pioGZDecompress); +#endif + +static pioDrive_i localDrive; +static pioDrive_i remoteDrive; + +pioDrive_i +pioDriveForLocation(fio_location loc) +{ + if (fio_is_remote(loc)) + return remoteDrive; + else + return localDrive; +} + +/* Base physical file type */ +#define kls__pioFile mth(fobjDispose) +fobj_klass(pioFile); + +static void +pioFile_fobjDispose(VSelf) +{ + Self(pioFile); + + ft_assert(self->closed, "File \"%s\" is disposing unclosed", self->path); + ft_free((void*)self->path); + self->path = NULL; +} + +static bool +common_pioExists(fobj_t self, path_t path, err_i *err) +{ + struct stat buf; + fobj_reset_err(err); + + /* follow symlink ? */ + buf = $(pioStat, self, path, true, err); + if (getErrno(*err) == ENOENT) + { + *err = $noerr(); + return false; + } + if ($noerr(*err) && !S_ISREG(buf.st_mode)) + *err = $err(SysErr, "File {path:q} is not regular", (path, path)); + if ($haserr(*err)) { + *err = $err(SysErr, "Could not check file existance: {cause:$M}", + (cause, (*err).self), (errNo, getErrno(*err)), + (errStr, getErrnoStr(*err))); + } + return $noerr(*err); +} + +/* LOCAL DRIVE */ + +static pioFile_i +pioLocalDrive_pioOpen(VSelf, path_t path, int flags, + int permissions, err_i *err) +{ + int fd; + fobj_reset_err(err); + fobj_t file; + + if (permissions == 0) + fd = open(path, flags, FILE_PERMISSION); + else + fd = open(path, flags, permissions); + if (fd < 0) + { + *err = $syserr("Cannot open file {path:q}", (path, path)); + return (pioFile_i){NULL}; + } + + file = $alloc(pioLocalFile, .fd = fd, + .p = { .path = ft_cstrdup(path), .flags = flags } ); + return bind_pioFile(file); +} + +static struct stat +pioLocalDrive_pioStat(VSelf, path_t path, bool follow_symlink, err_i *err) +{ + struct stat st = {0}; + int r; + fobj_reset_err(err); + + r = follow_symlink ? stat(path, &st) : lstat(path, &st); + if (r < 0) + *err = $syserr("Cannot stat file {path:q}", (path, path)); + return st; +} + +#define pioLocalDrive_pioExists common_pioExists + +static err_i +pioLocalDrive_pioRemove(VSelf, path_t path, bool missing_ok) +{ + if (remove_file_or_dir(path) != 0) + { + if (!missing_ok || errno != ENOENT) + return $syserr("Cannot remove {path:q}", (path, path)); + } + return $noerr(); +} + +static err_i +pioLocalDrive_pioRename(VSelf, path_t old_path, path_t new_path) +{ + if (rename(old_path, new_path) != 0) + return $syserr("Cannot rename file {old_path:q} to {new_path:q}", + (old_path, old_path), (new_path, new_path)); + return $noerr(); +} + +static pg_crc32 +pioLocalDrive_pioGetCRC32(VSelf, path_t path, bool compressed, err_i *err) +{ + fobj_reset_err(err); + elog(VERBOSE, "Local Drive calculate crc32 for '%s', compressed=%d", + path, compressed); + if (compressed) + return pgFileGetCRCgz(path, true, true); + else + return pgFileGetCRC(path, true, true); +} + +static bool +pioLocalDrive_pioIsRemote(VSelf) +{ + return false; +} + +/* LOCAL FILE */ + +static err_i +pioLocalFile_pioClose(VSelf, bool sync) +{ + Self(pioLocalFile); + err_i err = $noerr(); + int r; + + ft_assert(self->fd >= 0, "Closed file abused \"%s\"", self->p.path); + + if (sync && (self->p.flags & O_ACCMODE) != O_RDONLY) + { + r = fsync(self->fd); + if (r < 0) + err = $syserr("Cannot fsync file {path:q}", (path, self->p.path)); + } + r = close(self->fd); + if (r < 0 && $isNULL(err)) + err = $syserr("Cannot close file {path:q}", (path, self->p.path)); + self->fd = -1; + self->p.closed = true; + return err; +} + +static size_t +pioLocalFile_pioRead(VSelf, ft_bytes_t buf, err_i *err) +{ + Self(pioLocalFile); + ssize_t r; + fobj_reset_err(err); + + ft_assert(self->fd >= 0, "Closed file abused \"%s\"", self->p.path); + + r = read(self->fd, buf.ptr, buf.len); + if (r < 0) + { + *err = $syserr("Cannot read from {path:q}", (path, self->p.path)); + return 0; + } + return r; +} + +static size_t +pioLocalFile_pioWrite(VSelf, ft_bytes_t buf, err_i *err) +{ + Self(pioLocalFile); + ssize_t r; + fobj_reset_err(err); + + ft_assert(self->fd >= 0, "Closed file abused \"%s\"", self->p.path); + + if (buf.len == 0) + return 0; + + r = durable_write(self->fd, buf.ptr, buf.len); + if (r < 0) + { + *err = $syserr("Cannot write to file {path:q}", (path, self->p.path)); + return 0; + } + if (r < buf.len) + { + *err = $err(SysErr, "Short write on {path:q}: {writtenSz} < {wantedSz}", + (path, self->p.path), (writtenSz, r), (wantedSz, buf.len)); + } + return r; +} + +static err_i +pioLocalFile_pioFlush(VSelf) +{ + Self(pioLocalFile); + ft_assert(self->fd >= 0, "Closed file abused \"%s\"", self->p.path); + /* do nothing for unbuffered file */ + return $noerr(); +} + +static err_i +pioLocalFile_pioTruncate(VSelf, size_t sz) +{ + Self(pioLocalFile); + ft_assert(self->fd >= 0, "Closed file abused \"%s\"", self->p.path); + + if (ftruncate(self->fd, sz) < 0) + return $syserr("Cannot truncate file {path:q}", (path, self->p.path)); + return $noerr(); +} + +static fobjStr* +pioLocalFile_fobjRepr(VSelf) +{ + Self(pioLocalFile); + return $fmt("pioLocalFile({path:q}, fd:{fd}", + (path, $S(self->p.path)), (fd, $I(self->fd))); +} + +/* REMOTE DRIVE */ + +static pioFile_i +pioRemoteDrive_pioOpen(VSelf, path_t path, + int flags, int permissions, + err_i *err) +{ + int i; + fio_header hdr; + unsigned long mask; + fobj_reset_err(err); + fobj_t file; + + mask = fio_fdset; + for (i = 0; (mask & 1) != 0; i++, mask >>= 1); + if (i == FIO_FDMAX) + elog(ERROR, "Descriptor pool for remote files is exhausted, " + "probably too many remote files are opened"); + + hdr.cop = FIO_OPEN; + hdr.handle = i; + hdr.size = strlen(path) + 1; + hdr.arg = flags; + fio_fdset |= 1 << i; + + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_write_all(fio_stdout, path, hdr.size), hdr.size); + + /* check results */ + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + + if (hdr.arg != 0) + { + errno = (int)hdr.arg; + *err = $syserr("Cannot open remote file {path:q}", (path, path)); + fio_fdset &= ~(1 << hdr.handle); + return (pioFile_i){NULL}; + } + file = $alloc(pioRemoteFile, .handle = i, + .p = { .path = ft_cstrdup(path), .flags = flags }); + return bind_pioFile(file); +} + +static struct stat +pioRemoteDrive_pioStat(VSelf, path_t path, bool follow_symlink, err_i *err) +{ + struct stat st = {0}; + fio_header hdr = { + .cop = FIO_STAT, + .handle = -1, + .size = strlen(path) + 1, + .arg = follow_symlink, + }; + fobj_reset_err(err); + + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_write_all(fio_stdout, path, hdr.size), hdr.size); + + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + ft_dbg_assert(hdr.cop == FIO_STAT); + IO_CHECK(fio_read_all(fio_stdin, &st, sizeof(st)), sizeof(st)); + + if (hdr.arg != 0) + { + errno = (int)hdr.arg; + *err = $syserr("Cannot stat remote file {path:q}", (path, path)); + } + return st; +} + +#define pioRemoteDrive_pioExists common_pioExists + +static err_i +pioRemoteDrive_pioRemove(VSelf, path_t path, bool missing_ok) +{ + fio_header hdr = { + .cop = FIO_REMOVE, + .handle = -1, + .size = strlen(path) + 1, + .arg = missing_ok ? 1 : 0, + }; + + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_write_all(fio_stdout, path, hdr.size), hdr.size); + + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + ft_dbg_assert(hdr.cop == FIO_REMOVE); + + if (hdr.arg != 0) + { + errno = (int)hdr.arg; + return $syserr("Cannot remove remote file {path:q}", (path, path)); + } + return $noerr(); +} + +static err_i +pioRemoteDrive_pioRename(VSelf, path_t old_path, path_t new_path) +{ + size_t old_path_len = strlen(old_path) + 1; + size_t new_path_len = strlen(new_path) + 1; + fio_header hdr = { + .cop = FIO_RENAME, + .handle = -1, + .size = old_path_len + new_path_len, + .arg = 0, + }; + + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_write_all(fio_stdout, old_path, old_path_len), old_path_len); + IO_CHECK(fio_write_all(fio_stdout, new_path, new_path_len), new_path_len); + + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + ft_dbg_assert(hdr.cop == FIO_RENAME); + + if (hdr.arg != 0) + { + errno = (int)hdr.arg; + return $syserr("Cannot rename remote file {old_path:q} to {new_path:q}", + (old_path, old_path), (new_path, new_path)); + } + return $noerr(); +} + +static pg_crc32 +pioRemoteDrive_pioGetCRC32(VSelf, path_t path, bool compressed, err_i *err) +{ + fio_header hdr; + size_t path_len = strlen(path) + 1; + pg_crc32 crc = 0; + fobj_reset_err(err); + + hdr.cop = FIO_GET_CRC32; + hdr.handle = -1; + hdr.size = path_len; + hdr.arg = 0; + + if (compressed) + hdr.arg = 1; + elog(VERBOSE, "Remote Drive calculate crc32 for '%s', hdr.arg=%d", + path, compressed); + + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_write_all(fio_stdout, path, path_len), path_len); + IO_CHECK(fio_read_all(fio_stdin, &crc, sizeof(crc)), sizeof(crc)); + + return crc; +} + +static bool +pioRemoteDrive_pioIsRemote(VSelf) +{ + return true; +} + +/* REMOTE FILE */ + +static err_i +pioRemoteFile_pioSync(VSelf) +{ + Self(pioRemoteFile); + + fio_header hdr; + hdr.cop = FIO_SYNC_FILE; + hdr.handle = self->handle; + hdr.arg = 0; + hdr.size = 0; + + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + + if (hdr.arg != 0) + { + errno = (int)hdr.arg; + return $syserr("Cannot fsync remote file {path:q}", (path, self->p.path)); + } + return $noerr(); +} + +static err_i +pioRemoteFile_pioClose(VSelf, bool sync) +{ + Self(pioRemoteFile); + err_i err = $noerr(); + fio_header hdr; + + ft_assert(self->handle >= 0, "Remote closed file abused \"%s\"", self->p.path); + + if (sync && (self->p.flags & O_ACCMODE) != O_RDONLY) + err = pioRemoteFile_pioSync(self); + + hdr = (fio_header){ + .cop = FIO_CLOSE, + .handle = self->handle, + .size = 0, + .arg = 0, + }; + + fio_fdset &= ~(1 << hdr.handle); + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + + /* Wait for response */ + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + ft_dbg_assert(hdr.cop == FIO_CLOSE); + + if (hdr.arg != 0 && $isNULL(err)) + { + errno = (int)hdr.arg; + err = $syserr("Cannot close remote file {path:q}", (path, self->p.path)); + } + + self->p.closed = true; + + return err; +} + +static size_t +pioRemoteFile_pioAsyncRead(VSelf, ft_bytes_t buf, err_i *err) +{ + Self(pioRemoteFile); + fio_header hdr = {0}; + size_t buflen = buf.len; + ft_bytes_t bytes; + fobj_reset_err(err); + + ft_assert(self->handle >= 0, "Remote closed file abused \"%s\"", self->p.path); + + if (self->asyncEof) + { + return 0; + } + else if (!self->didAsync) + { + /* start reading */ + hdr.cop = FIO_SEND_FILE_CONTENT; + hdr.handle = self->handle; + + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + if (self->asyncChunk == NULL) + self->asyncChunk = ft_malloc(CHUNK_SIZE); + self->didAsync = true; + } + + while (buf.len > 0) + { + if (self->chunkRest.len > 0) + { + ft_bytes_move(&buf, &self->chunkRest); + continue; + } + + if (buf.len >= CHUNK_SIZE) + bytes = ft_bytes(buf.ptr, CHUNK_SIZE); + else + bytes = ft_bytes(self->asyncChunk, CHUNK_SIZE); + + /* receive data */ + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + + if (hdr.cop == FIO_SEND_FILE_EOF) + { + self->asyncEof = true; + break; + } + else if (hdr.cop == FIO_ERROR) + { + int erno = EIO; + switch ((int)hdr.arg) + { + case FILE_MISSING: + erno = ENOENT; + break; + case OPEN_FAILED: + /* We should be already opened. */ + ft_assert((int)hdr.arg != OPEN_FAILED); + break; + case READ_FAILED: + erno = EIO; + break; + } + /* handle error, reported by the agent */ + if (hdr.size > 0) + { + ft_assert(hdr.size < CHUNK_SIZE); + IO_CHECK(fio_read_all(fio_stdin, self->asyncChunk, hdr.size), hdr.size); + errno = erno; + ft_assert(((char*)self->asyncChunk)[hdr.size] == 0); + *err = $syserr("Cannot async read remote file {path:q}: {remotemsg}", + (remotemsg, self->asyncChunk), + (path, self->p.path)); + break; + } + else + { + errno = erno; + *err = $syserr("Cannot async read remote file {path:q}", (path, self->p.path)); + } + fio_disconnect(); /* discard possible pending data in pipe */ + break; + } + else if (hdr.cop == FIO_PAGE) + { + ft_assert(hdr.size <= CHUNK_SIZE); + IO_CHECK(fio_read_all(fio_stdin, bytes.ptr, hdr.size), hdr.size); + + if (bytes.ptr != buf.ptr) + { + bytes.len = hdr.size; + ft_bytes_move(&buf, &bytes); + self->chunkRest = bytes; + } + else + { + ft_bytes_consume(&buf, hdr.size); + } + } + else + { + /* TODO: fio_disconnect may get assert fail when running after this */ + elog(ERROR, "Remote agent returned message of unexpected type: %i", hdr.cop); + } + } + + return (buflen - buf.len); +} + +static size_t +pioRemoteFile_pioRead(VSelf, ft_bytes_t buf, err_i *err) +{ + Self(pioRemoteFile); + + ft_assert(self->handle >= 0, "Remote closed file abused \"%s\"", self->p.path); + + if (self->asyncMode) + return $(pioAsyncRead, self, buf, err); + + fio_header hdr = { + .cop = FIO_READ, + .handle = self->handle, + .size = 0, + .arg = buf.len, + }; + fobj_reset_err(err); + + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + ft_dbg_assert(hdr.cop == FIO_SEND); + IO_CHECK(fio_read_all(fio_stdin, buf.ptr, hdr.size), hdr.size); + if (hdr.arg != 0) { + errno = (int)hdr.arg; + *err = $syserr("Cannot read remote file {path:q}", (path, self->p.path)); + return 0; + } + + return hdr.size; +} + +static size_t +pioRemoteFile_pioWrite(VSelf, ft_bytes_t buf, err_i *err) +{ + Self(pioRemoteFile); + fio_header hdr; + fobj_reset_err(err); + + ft_assert(self->handle >= 0, "Remote closed file abused \"%s\"", self->p.path); + + if (buf.len == 0) + return 0; + + if (self->asyncMode) + return pioAsyncWrite(self, buf, err); + + hdr = (fio_header){ + .cop = FIO_WRITE, + .handle = self->handle, + .size = buf.len, + .arg = 0, + }; + + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_write_all(fio_stdout, buf.ptr, buf.len), buf.len); + + /* check results */ + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + ft_dbg_assert(hdr.cop == FIO_WRITE); + + /* set errno */ + if (hdr.arg != 0) { + errno = (int)hdr.arg; + *err = $syserr("Cannot write remote file {path:q}", (path, self->p.path)); + return 0; + } + + return buf.len; +} + +static err_i +pioRemoteFile_pioFlush(VSelf) +{ + Self(pioRemoteFile); + + ft_assert(self->handle >= 0, "Remote closed file abused \"%s\"", self->p.path); + + if (self->asyncMode) + return pioAsyncError(self); + return $noerr(); +} + +static err_i +pioRemoteFile_pioTruncate(VSelf, size_t sz) +{ + Self(pioRemoteFile); + + ft_assert(self->handle >= 0, "Remote closed file abused \"%s\"", self->p.path); + + fio_header hdr = { + .cop = FIO_TRUNCATE, + .handle = self->handle, + .size = 0, + .arg = sz, + }; + + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + + return $noerr(); +} + +static err_i +pioRemoteFile_pioSetAsync(VSelf, bool async) +{ + Self(pioRemoteFile); + + ft_assert(self->handle >= 0, "Remote closed file abused \"%s\"", self->p.path); + + if (!self->asyncMode && async) + { + if ((self->p.flags & O_ACCMODE) == O_RDWR) + return $err(RT, "Could not enable async mode on Read-Write file"); + self->asyncMode = true; + } + else if (self->asyncMode && !async) + { + err_i err = pioAsyncError(self); + self->asyncMode = false; + return err; + } + return $noerr(); +} + +static size_t +pioRemoteFile_pioAsyncWrite(VSelf, ft_bytes_t buf, err_i *err) +{ + Self(pioRemoteFile); + fio_header hdr; + + ft_assert(self->handle >= 0, "Remote closed file abused \"%s\"", self->p.path); + + if ($haserr(self->asyncError)) { + *err = self->asyncError; + return 0; + } + + if (buf.len == 0) + return 0; + + hdr = (fio_header){ + .cop = FIO_WRITE_ASYNC, + .handle = self->handle, + .size = buf.len, + .arg = 0, + }; + + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_write_all(fio_stdout, buf.ptr, buf.len), buf.len); + self->didAsync = true; + return buf.len; +} + +static err_i +pioRemoteFile_pioAsyncError(VSelf) +{ + Self(pioRemoteFile); + char *errmsg; + fio_header hdr; + + if ($haserr(self->asyncError) || !self->didAsync) + { + self->didAsync = false; + return self->asyncError; + } + + hdr.cop = FIO_GET_ASYNC_ERROR; + hdr.size = 0; + + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + + /* check results */ + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + + if (hdr.size == 0) + return $noerr(); + + errmsg = pgut_malloc(ERRMSG_MAX_LEN); + IO_CHECK(fio_read_all(fio_stdin, errmsg, hdr.size), hdr.size); + self->asyncError = $err(SysErr, "{remotemsg}", (remotemsg, errmsg)); + self->didAsync = false; + free(errmsg); + return self->asyncError; +} + +static void +pioRemoteFile_fobjDispose(VSelf) +{ + Self(pioRemoteFile); + $idel(&self->asyncError); + ft_free(self->asyncChunk); +} + +static fobjStr* +pioRemoteFile_fobjRepr(VSelf) +{ + Self(pioRemoteFile); + return $fmt("pioRemoteFile({path:q}, hnd:{hnd}, async:{asyncMode}, err:{asyncError})", + (path, $S(self->p.path)), + (hnd, $I(self->handle)), + (asyncMode, $B(self->asyncMode)), + (err, self->asyncError.self)); +} + +pioRead_i +pioWrapReadFilter(pioRead_i fl, pioFilter_i flt, size_t buf_size) +{ + void *buf; + fobj_t wrap; + + buf = ft_malloc(buf_size); + wrap = $alloc(pioReadFilter, + .wrapped = $iref(fl), + .filter = $iref(flt), + .buffer = buf, + .capa = buf_size); + return bind_pioRead(wrap); +} + +static size_t +pioReadFilter_pioRead(VSelf, ft_bytes_t wbuf, err_i *err) +{ + Self(pioReadFilter); + fobj_reset_err(err); + pioTransformResult tr; + size_t wlen = wbuf.len; + ft_bytes_t rbuf; + size_t r; + + if (self->eof && self->finished) + return 0; + + while (wbuf.len > 0) + { + /* feed filter */ + rbuf = ft_bytes(self->buffer, self->len); + while (rbuf.len > 0) + { + tr = $i(pioTransform, self->filter, rbuf, wbuf, err); + if ($haserr(*err)) + return wlen - wbuf.len; + ft_bytes_consume(&rbuf, tr.consumed); + ft_bytes_consume(&wbuf, tr.produced); + + if (tr.produced == 0) /* Probably need more input to produce */ + break; + } + + if (self->eof) + break; + + /* move rest if any */ + if (rbuf.len > 0) + memmove(self->buffer, rbuf.ptr, rbuf.len); + self->len = rbuf.len; + + /* feed buffer */ + rbuf = ft_bytes(self->buffer, self->capa); + ft_bytes_consume(&rbuf, self->len); + ft_assert(rbuf.len > 0); + r = $i(pioRead, self->wrapped, rbuf, err); + if ($haserr(*err)) + return wlen - wbuf.len; + if (r == 0) + self->eof = true; + self->len += r; + } + + while (wbuf.len > 0 && self->eof) + { + r = $i(pioFinish, self->filter, wbuf, err); + if ($haserr(*err)) + return (ssize_t)(wlen - wbuf.len); + ft_bytes_consume(&wbuf, r); + if (r == 0) + { + self->finished = true; + break; + } + } + + return wlen - wbuf.len; +} + +static err_i +pioReadFilter_pioClose(VSelf, bool sync) +{ + Self(pioReadFilter); + err_i err = $noerr(); + err_i errcl = $noerr(); + size_t r; + + if (!self->finished) + { + r = $i(pioFinish, self->filter, ft_bytes(NULL, 0), &err); + ft_assert(r == 0); + } + if ($ifdef(errcl =, pioClose, self->wrapped.self, sync)) + err = fobj_err_combine(err, errcl); + return err; +} + +static void +pioReadFilter_fobjDispose(VSelf) +{ + Self(pioReadFilter); + $idel(&self->wrapped); + $idel(&self->filter); + ft_free(self->buffer); +} + +static fobjStr* +pioReadFilter_fobjRepr(VSelf) +{ + Self(pioReadFilter); + return $fmt("pioReadFilter(wrapped: {wrapped}, filter: {filter})", + (wrapped, self->wrapped.self), + (filter, self->filter.self)); +} + +pioWriteFlush_i +pioWrapWriteFilter(pioWriteFlush_i fl, pioFilter_i flt, size_t buf_size) +{ + void *buf; + fobj_t wrap; + + buf = ft_malloc(buf_size); + wrap = $alloc(pioWriteFilter, + .wrapped = $iref(fl), + .filter = $iref(flt), + .buffer = buf, + .capa = buf_size); + return bind_pioWriteFlush(wrap); +} + +static size_t +pioWriteFilter_pioWrite(VSelf, ft_bytes_t rbuf, err_i *err) +{ + Self(pioWriteFilter); + fobj_reset_err(err); + pioTransformResult tr; + size_t rlen = rbuf.len; + ft_bytes_t wbuf; + size_t r; + + while (rbuf.len > 0) + { + wbuf = ft_bytes(self->buffer, self->capa); + while (wbuf.len > 0) + { + tr = $i(pioTransform, self->filter, rbuf, wbuf, err); + if ($haserr(*err)) + return rlen - rbuf.len; + ft_bytes_consume(&rbuf, tr.consumed); + ft_bytes_consume(&wbuf, tr.produced); + + if (tr.produced == 0) /* Probably need more input to produce */ + break; + } + + /* feed writer */ + wbuf = ft_bytes(self->buffer, (char*)wbuf.ptr - (char*)self->buffer); + if (wbuf.len == 0) + { + ft_dbg_assert(rbuf.len == 0); + break; + } + r = $i(pioWrite, self->wrapped, wbuf, err); + if ($haserr(*err)) + return rlen - rbuf.len; + ft_assert(r == wbuf.len); + } + + if (rbuf.len) + { + *err = $err(SysErr, "short write: {writtenSz} < {wantedSz}", + (writtenSz, rlen - rbuf.len), (wantedSz, rbuf.len)); + } + return rlen - rbuf.len; +} + +static err_i +pioWriteFilter_pioFlush(VSelf) +{ + Self(pioWriteFilter); + err_i err = $noerr(); + ft_bytes_t wbuf; + size_t r; + + while (!self->finished) + { + wbuf = ft_bytes(self->buffer, self->capa); + while (wbuf.len > 0) + { + r = $i(pioFinish, self->filter, wbuf, &err); + if ($haserr(err)) + return err; + ft_bytes_consume(&wbuf, r); + if (r == 0) + { + self->finished = true; + break; + } + } + + /* feed writer */ + wbuf = ft_bytes(self->buffer, (char*)wbuf.ptr - (char*)self->buffer); + if (wbuf.len == 0) + break; + + ft_assert(wbuf.len > 0); + r = $i(pioWrite, self->wrapped, wbuf, &err); + if ($haserr(err)) + return err; + ft_assert(r == wbuf.len); + } + return err; +} + +static err_i +pioWriteFilter_pioClose(VSelf, bool sync) +{ + Self(pioWriteFilter); + err_i err = $noerr(); + err_i errcl = $noerr(); + size_t r; + + if (!self->finished) + { + r = $i(pioFinish, self->filter, ft_bytes(NULL, 0), &err); + ft_assert(r == 0); + } + if ($ifdef(errcl =, pioClose, self->wrapped.self, sync)) + err = fobj_err_combine(err, errcl); + return err; +} + +static void +pioWriteFilter_fobjDispose(VSelf) +{ + Self(pioWriteFilter); + $idel(&self->wrapped); + $idel(&self->filter); + ft_free(self->buffer); +} + +static fobjStr* +pioWriteFilter_fobjRepr(VSelf) +{ + Self(pioWriteFilter); + return $fmt("pioWriteFilter(wrapped: {wrapped}, filter: {filter})", + (wrapped, self->wrapped.self), + (filter, self->filter.self)); +} + +#ifdef HAVE_LIBZ +static err_i +newGZError(const char *gzmsg, int gzerrno) +{ + if (gzerrno == Z_OK && errno == 0) + return $noerr(); + if (gzerrno == Z_ERRNO) { + return $syserr("System error during GZ"); + } + + return $err(GZ, "GZ error: {gzErrStr}", (gzErrStr, gzmsg), (gzErrNo, gzerrno)); +} + +pioFilter_i +pioGZCompressFilter(int level) +{ + pioGZCompress *gz; + int rc; + + gz = $alloc(pioGZCompress); + rc = deflateInit2(&gz->strm, + level, + Z_DEFLATED, + MAX_WBITS + 16, DEF_MEM_LEVEL, + Z_DEFAULT_STRATEGY); + ft_assert(rc == Z_OK, "zlib internal error: %s", gz->strm.msg); + return bind_pioFilter(gz); +} + +pioFilter_i +pioGZDecompressFilter(bool ignoreTruncate) +{ + pioGZDecompress *gz; + int rc; + + gz = $alloc(pioGZDecompress, .ignoreTruncate = ignoreTruncate); + + rc = inflateInit2(&gz->strm, 15 + 16); + ft_assert(rc == Z_OK, "zlib internal error: %s", gz->strm.msg); + return bind_pioFilter(gz); +} + +static pioTransformResult +pioGZCompress_pioTransform(VSelf, ft_bytes_t rbuf, ft_bytes_t wbuf, err_i *err) +{ + Self(pioGZCompress); + pioTransformResult tr = {0, 0}; + size_t rlen = rbuf.len; + size_t wlen = wbuf.len; + ssize_t rc; + fobj_reset_err(err); + + if (self->finished) + { + *err = $err(RT, "pioGZCompress already finished"); + return tr; + } + + while (rbuf.len > 0 && wbuf.len > 0) + { + self->strm.next_in = (Bytef *)rbuf.ptr; + self->strm.avail_in = rbuf.len; + self->strm.next_out = (Bytef *)wbuf.ptr; + self->strm.avail_out = wbuf.len; + + rc = deflate(&self->strm, Z_NO_FLUSH); + ft_dbg_assert(rc == Z_OK); + + ft_bytes_consume(&wbuf, wbuf.len - self->strm.avail_out); + ft_bytes_consume(&rbuf, rbuf.len - self->strm.avail_in); + } + + tr.produced = wlen - wbuf.len; + tr.consumed = rlen - rbuf.len; + return tr; +} + +static size_t +pioGZCompress_pioFinish(VSelf, ft_bytes_t wbuf, err_i *err) +{ + Self(pioGZCompress); + size_t wlen = wbuf.len; + int rc; + fobj_reset_err(err); + + if (self->finished) + return 0; + + while (wbuf.len > 0) + { + self->strm.avail_in = 0; + self->strm.next_out = (Bytef *)wbuf.ptr; + self->strm.avail_out = wbuf.len; + + rc = deflate(&self->strm, Z_FINISH); + + ft_bytes_consume(&wbuf, wbuf.len - self->strm.avail_out); + + if (rc == Z_STREAM_END) + { + rc = deflateEnd(&self->strm); + ft_dbg_assert(rc == Z_OK); + self->finished = true; + break; + } + ft_dbg_assert(rc == Z_OK); + } + + return wlen - wbuf.len; +} + +static void +pioGZCompress_fobjDispose(VSelf) +{ + Self(pioGZCompress); + int rc; + + if (!self->finished) + { + rc = deflateEnd(&self->strm); + ft_dbg_assert(rc == Z_OK || rc == Z_DATA_ERROR); + } +} + +static fobjStr* +pioGZCompress_fobjRepr(VSelf) +{ + Self(pioGZCompress); + return $S("pioGZCompress"); +} + +static pioTransformResult +pioGZDecompress_pioTransform(VSelf, ft_bytes_t rbuf, ft_bytes_t wbuf, err_i* err) +{ + Self(pioGZDecompress); + pioTransformResult tr = {0, 0}; + size_t rlen = rbuf.len; + size_t wlen = wbuf.len; + int rc; + fobj_reset_err(err); + + if (self->finished) + { + *err = $err(RT, "pioGZDecompress already finished"); + return tr; + } + + if (self->eof) + return tr; + + while (rbuf.len > 0 && wbuf.len > 0) + { + self->strm.next_in = (Bytef *)rbuf.ptr; + self->strm.avail_in = rbuf.len; + self->strm.next_out = (Bytef *)wbuf.ptr; + self->strm.avail_out = wbuf.len; + + rc = inflate(&self->strm, Z_NO_FLUSH); + + ft_bytes_consume(&wbuf, wbuf.len - self->strm.avail_out); + ft_bytes_consume(&rbuf, rbuf.len - self->strm.avail_in); + + if (rc == Z_STREAM_END) + { + self->eof = true; + break; + } + else if (rc != Z_OK) + { + *err = newGZError(self->strm.msg, rc); + break; + } + } + + tr.produced += wlen - wbuf.len; + tr.consumed += rlen - rbuf.len; + return tr; +} + +static size_t +pioGZDecompress_pioFinish(VSelf, ft_bytes_t wbuf, err_i *err) +{ + Self(pioGZDecompress); + size_t wlen = wbuf.len; + int rc; + fobj_reset_err(err); + + if (self->finished) + return 0; + + while (wbuf.len > 0 && !self->eof) + { + self->strm.avail_in = 0; + self->strm.next_out = (Bytef *)wbuf.ptr; + self->strm.avail_out = wbuf.len; + + rc = inflate(&self->strm, Z_SYNC_FLUSH); + + ft_bytes_consume(&wbuf, wbuf.len - self->strm.avail_out); + + if (rc == Z_STREAM_END) + { + self->eof = true; + } + else if (rc == Z_BUF_ERROR && self->ignoreTruncate) + { + self->eof = true; + } + else if (rc != Z_OK) + { + *err = newGZError(self->strm.msg, rc); + break; + } + } + + if (self->eof && !self->finished) + { + rc = inflateEnd(&self->strm); + ft_dbg_assert(rc == Z_OK); + self->finished = true; + } + + return wlen - wbuf.len; +} + +static void +pioGZDecompress_fobjDispose(VSelf) +{ + Self(pioGZDecompress); + int rc; + + if (!self->finished) { + rc = inflateEnd(&self->strm); + ft_dbg_assert(rc == Z_OK); + } +} + +static fobjStr* +pioGZDecompress_fobjRepr(VSelf) +{ + Self(pioGZCompress); + return $S("pioGZDecompress"); +} +#endif + +err_i +pioCopyWithFilters(pioWriteFlush_i dest, pioRead_i src, + pioFilter_i *filters, int nfilters, size_t *copied) +{ + FOBJ_FUNC_ARP(); + size_t _fallback_copied = 0; + err_i err = $noerr(); + void* buf; + int i; + + if (copied == NULL) + copied = &_fallback_copied; + + if ($ifdef(err = , pioSetAsync, src.self) && $haserr(err)) + elog(ERROR, "Cannot enable async mode on source \"%s\": %s", + $irepr(src)->ptr, $errmsg(err)); + + if ($ifdef(err = , pioSetAsync, dest.self) && $haserr(err)) + elog(ERROR, "Cannot enable async mode on destination \"%s\": %s", + $irepr(dest)->ptr, $errmsg(err)); + + for (i = nfilters - 1; i >= 0; i--) + dest = pioWrapWriteFilter(dest, filters[i], OUT_BUF_SIZE); + + buf = ft_malloc(OUT_BUF_SIZE); + + for (;;) + { + size_t read_len = 0; + size_t write_len = 0; + + read_len = $i(pioRead, src, ft_bytes(buf, OUT_BUF_SIZE), &err); + + if ($haserr(err)) + $ireturn(err); + + if (read_len == 0) + break; + + write_len = $i(pioWrite, dest, ft_bytes(buf, read_len), &err); + if (write_len != read_len || $haserr(err)) + { + if ($haserr(err)) + $ireturn(err); + + $ireturn($err(SysErr, "Short write to destination file {path}: {writtenSz} < {wantedSz}", + (path, $irepr(dest)->ptr), + (wantedSz, read_len), (writtenSz, write_len))); + } + } + + /* pioFlush will check for async error if destination was remote */ + err = $i(pioFlush, dest); + if ($haserr(err)) + $ireturn($err(SysErr, "Cannot flush file {path}: {cause}", + (path, $irepr(dest)->ptr), (cause, err.self))); + return $noerr(); +} + +fobj_klass_handle(pioFile); +fobj_klass_handle(pioLocalDrive); +fobj_klass_handle(pioRemoteDrive); +fobj_klass_handle(pioLocalFile, inherits(pioFile), mth(fobjRepr)); +fobj_klass_handle(pioRemoteFile, inherits(pioFile), mth(fobjDispose, fobjRepr)); +fobj_klass_handle(pioWriteFilter, mth(fobjDispose, fobjRepr)); +fobj_klass_handle(pioReadFilter, mth(fobjDispose, fobjRepr)); + +#ifdef HAVE_LIBZ +fobj_klass_handle(pioGZCompress, mth(fobjRepr)); +fobj_klass_handle(pioGZDecompress, mth(fobjRepr)); +#endif + +void +init_pio_objects(void) +{ + FOBJ_FUNC_ARP(); + + localDrive = bindref_pioDrive($alloc(pioLocalDrive)); + remoteDrive = bindref_pioDrive($alloc(pioRemoteDrive)); +} \ No newline at end of file diff --git a/src/utils/file.h b/src/utils/file.h index 5639a3e4c..33b5444b3 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -10,6 +10,8 @@ #include #endif +#include + typedef enum { /* message for compatibility check */ @@ -55,7 +57,9 @@ typedef enum FIO_CHECK_POSTMASTER, FIO_GET_ASYNC_ERROR, FIO_WRITE_ASYNC, - FIO_READLINK + FIO_READLINK, + FIO_SYNC_FILE, + FIO_SEND_FILE_CONTENT, } fio_operations; typedef struct @@ -176,4 +180,134 @@ struct datapagemap; /* defined in datapagemap.h */ extern struct datapagemap *fio_get_lsn_map(fio_location location, const char *fullpath, uint32 checksum_version, int n_blocks, XLogRecPtr horizonLsn, BlockNumber segmentno); + +// OBJECTS + +extern void init_pio_objects(void); + +typedef const char* path_t; + +fobj_error_cstr_key(remotemsg); +fobj_error_int_key(writtenSz); +fobj_error_int_key(wantedSz); + +#ifdef HAVE_LIBZ +fobj_error_kind(GZ); +fobj_error_int_key(gzErrNo); +fobj_error_cstr_key(gzErrStr); +#endif + +// File +#define mth__pioClose err_i, (bool, sync) +#define mth__pioClose__optional() (sync, false) +#define mth__pioRead size_t, (ft_bytes_t, buf), (err_i *, err) +#define mth__pioWrite size_t, (ft_bytes_t, buf), (err_i *, err) +#define mth__pioTruncate err_i, (size_t, sz) +#define mth__pioFlush err_i +fobj_method(pioClose); +fobj_method(pioRead); +fobj_method(pioWrite); +fobj_method(pioTruncate); +fobj_method(pioFlush); + +#define iface__pioFile mth(pioWrite, pioFlush, pioRead, pioTruncate, pioClose) +#define iface__pioWriteFlush mth(pioWrite, pioFlush) +#define iface__pioWriteCloser mth(pioWrite, pioFlush, pioClose) +#define iface__pioReadCloser mth(pioRead, pioClose) +fobj_iface(pioFile); +fobj_iface(pioWriteFlush); +fobj_iface(pioWriteCloser); +fobj_iface(pioReadCloser); + +// Drive +#define mth__pioOpen pioFile_i, (path_t, path), (int, flags), \ + (int, permissions), (err_i *, err) +#define mth__pioOpen__optional() (permissions, FILE_PERMISSION) +#define mth__pioStat struct stat, (path_t, path), (bool, follow_symlink), \ + (err_i *, err) +#define mth__pioRemove err_i, (path_t, path), (bool, missing_ok) +#define mth__pioRename err_i, (path_t, old_path), (path_t, new_path) +#define mth__pioExists bool, (path_t, path), (err_i *, err) +#define mth__pioGetCRC32 pg_crc32, (path_t, path), (bool, compressed), \ + (err_i *, err) +#define mth__pioIsRemote bool + +fobj_method(pioOpen); +fobj_method(pioStat); +fobj_method(pioRemove); +fobj_method(pioRename); +fobj_method(pioExists); +fobj_method(pioIsRemote); +fobj_method(pioGetCRC32); + +#define iface__pioDrive mth(pioOpen, pioStat, pioRemove, pioRename), \ + mth(pioExists, pioGetCRC32, pioIsRemote) +fobj_iface(pioDrive); + +#define kls__pioLocalDrive iface__pioDrive, iface(pioDrive) +#define kls__pioRemoteDrive iface__pioDrive, iface(pioDrive) +fobj_klass(pioLocalDrive); +fobj_klass(pioRemoteDrive); + +extern pioDrive_i pioDriveForLocation(fio_location location); + +#define pioFile__common_methods mth(pioRead, pioWrite, pioFlush, pioTruncate, pioClose) + +#define kls__pioLocalFile iface__pioFile, iface(pioFile) +fobj_klass(pioLocalFile); + +#define mth__pioSetAsync err_i, (bool, async) +#define mth__pioSetAsync__optional() (async, true) +#define mth__pioAsyncRead size_t, (ft_bytes_t, buf), (err_i*, err) +#define mth__pioAsyncWrite size_t, (ft_bytes_t, buf), (err_i*, err) +#define mth__pioAsyncError err_i +fobj_method(pioSetAsync); +fobj_method(pioAsyncRead); +fobj_method(pioAsyncWrite); +fobj_method(pioAsyncError); + +#define kls__pioRemoteFile iface__pioFile, iface(pioFile), \ + mth(pioSetAsync, pioAsyncRead, pioAsyncWrite, pioAsyncError) +fobj_klass(pioRemoteFile); + +// Filter +typedef struct pioTransformResult { + size_t consumed; + size_t produced; +} pioTransformResult; + +#define mth__pioTransform pioTransformResult, (ft_bytes_t, in), \ + (ft_bytes_t, out), \ + (err_i*, err) +fobj_method(pioTransform); +#define mth__pioFinish size_t, (ft_bytes_t, out), (err_i*, err) +fobj_method(pioFinish); + +#define iface__pioFilter mth(pioTransform), opt(pioFinish) +fobj_iface(pioFilter); + +#define kls__pioReadFilter mth(pioRead, pioClose) +#define kls__pioWriteFilter iface__pioWriteFlush, iface(pioWriteFlush), \ + mth(pioClose) +fobj_klass(pioReadFilter); +fobj_klass(pioWriteFilter); + +extern pioWriteFlush_i pioWrapWriteFilter(pioWriteFlush_i fl, + pioFilter_i flt, + size_t buf_size); +extern pioRead_i pioWrapReadFilter(pioRead_i fl, + pioFilter_i flt, + size_t buf_size); + +#ifdef HAVE_LIBZ +extern pioFilter_i pioGZCompressFilter(int level); +extern pioFilter_i pioGZDecompressFilter(bool ignoreTruncate); +#endif + +extern err_i pioCopyWithFilters(pioWriteFlush_i dest, pioRead_i src, + pioFilter_i *filters, int nfilters, size_t *copied); +#define pioCopy(dest, src, ...) ({ \ + pioFilter_i _fltrs_[] = {__VA_ARGS__}; \ + pioCopyWithFilters((dest), (src), _fltrs_, ft_arrsz(_fltrs_), NULL); \ +}) #endif From 2abdff382aba3e877592e8d6e13773c787988c35 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 5 Jul 2022 10:49:10 +0300 Subject: [PATCH 024/339] archive.c: reimplement push_file_internal and get_wal_file --- src/archive.c | 1177 ++++++++++++++------------------------------ src/pg_probackup.h | 1 - src/utils/file.c | 160 ------ 3 files changed, 364 insertions(+), 974 deletions(-) diff --git a/src/archive.c b/src/archive.c index 01ea15a1f..2000538f9 100644 --- a/src/archive.c +++ b/src/archive.c @@ -13,23 +13,16 @@ #include "utils/thread.h" #include "instr_time.h" -static int push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_dir, - const char *archive_dir, bool overwrite, bool no_sync, - uint32 archive_timeout); -#ifdef HAVE_LIBZ -static int push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, - const char *archive_dir, bool overwrite, bool no_sync, - int compress_level, uint32 archive_timeout); -#endif +static int push_file_internal(const char *wal_file_name, + const char *pg_xlog_dir, + const char *archive_dir, + bool overwrite, bool no_sync, + bool is_compress, int compress_level, + uint32 archive_timeout); static void *push_files(void *arg); static void *get_files(void *arg); static bool get_wal_file(const char *filename, const char *from_path, const char *to_path, bool prefetch_mode); -static int get_wal_file_internal(const char *from_path, const char *to_path, FILE *out, - bool is_decompress); -#ifdef HAVE_LIBZ -static const char *get_gz_error(gzFile gzf, int errnum); -#endif //static void copy_file_attributes(const char *from_path, // fio_location from_location, // const char *to_path, fio_location to_location, @@ -335,17 +328,10 @@ push_file(WALSegno *xlogfile, const char *archive_status_dir, elog(LOG, "pushing file \"%s\"", xlogfile->name); - /* If compression is not required, then just copy it as is */ - if (!is_compress) - rc = push_file_internal_uncompressed(xlogfile->name, pg_xlog_dir, - archive_dir, overwrite, no_sync, - archive_timeout); -#ifdef HAVE_LIBZ - else - rc = push_file_internal_gz(xlogfile->name, pg_xlog_dir, archive_dir, - overwrite, no_sync, compress_level, - archive_timeout); -#endif + rc = push_file_internal(xlogfile->name, pg_xlog_dir, + archive_dir, overwrite, no_sync, + is_compress, compress_level, + archive_timeout); /* take '--no-ready-rename' flag into account */ if (!no_ready_rename && archive_status_dir != NULL) @@ -372,538 +358,245 @@ push_file(WALSegno *xlogfile, const char *archive_status_dir, return rc; } -/* - * Copy non WAL file, such as .backup or .history file, into WAL archive. - * Such files are not compressed. - * Returns: - * 0 - file was successfully pushed - * 1 - push was skipped because file already exists in the archive and - * has the same checksum - */ -int -push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_dir, - const char *archive_dir, bool overwrite, bool no_sync, - uint32 archive_timeout) +static void +remove_temp_wal_file(pioDrive_i *backup_drive, char *partpath) { - FILE *in = NULL; - int out = -1; - char *buf = pgut_malloc(OUT_BUF_SIZE); /* 1MB buffer */ - char from_fullpath[MAXPGPATH]; - char to_fullpath[MAXPGPATH]; - /* partial handling */ - struct stat st; - char to_fullpath_part[MAXPGPATH]; - int partial_try_count = 0; - int partial_file_size = 0; - bool partial_is_stale = true; - /* remote agent error message */ - char *errmsg = NULL; - - /* from path */ - join_path_components(from_fullpath, pg_xlog_dir, wal_file_name); - canonicalize_path(from_fullpath); - /* to path */ - join_path_components(to_fullpath, archive_dir, wal_file_name); - canonicalize_path(to_fullpath); - - /* Open source file for read */ - in = fopen(from_fullpath, PG_BINARY_R); - if (in == NULL) - elog(ERROR, "Cannot open source file \"%s\": %s", from_fullpath, strerror(errno)); - - /* disable stdio buffering for input file */ - setvbuf(in, NULL, _IONBF, BUFSIZ); - - /* open destination partial file for write */ - snprintf(to_fullpath_part, sizeof(to_fullpath_part), "%s.part", to_fullpath); - - /* Grab lock by creating temp file in exclusive mode */ - out = fio_open(FIO_BACKUP_HOST, to_fullpath_part, O_RDWR | O_CREAT | O_EXCL | PG_BINARY); - if (out < 0) - { - if (errno != EEXIST) - elog(ERROR, "Failed to open temp WAL file \"%s\": %s", - to_fullpath_part, strerror(errno)); - /* Already existing destination temp file is not an error condition */ - } - else - goto part_opened; - - /* - * Partial file already exists, it could have happened due to: - * 1. failed archive-push - * 2. concurrent archiving - * - * For ARCHIVE_TIMEOUT period we will try to create partial file - * and look for the size of already existing partial file, to - * determine if it is changing or not. - * If after ARCHIVE_TIMEOUT we still failed to create partial - * file, we will make a decision about discarding - * already existing partial file. - */ - - while (partial_try_count < archive_timeout) - { - if (fio_stat(FIO_BACKUP_HOST, to_fullpath_part, &st, false) < 0) - { - if (errno == ENOENT) - { - //part file is gone, lets try to grab it - out = fio_open(FIO_BACKUP_HOST, to_fullpath_part, O_RDWR | O_CREAT | O_EXCL | PG_BINARY); - if (out < 0) - { - if (errno != EEXIST) - elog(ERROR, "Failed to open temp WAL file \"%s\": %s", - to_fullpath_part, strerror(errno)); - } - else - /* Successfully created partial file */ - break; - } - else - elog(ERROR, "Cannot stat temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); - } - - /* first round */ - if (!partial_try_count) - { - elog(LOG, "Temp WAL file already exists, waiting on it %u seconds: \"%s\"", - archive_timeout, to_fullpath_part); - partial_file_size = st.st_size; - } - - /* file size is changing */ - if (st.st_size > partial_file_size) - partial_is_stale = false; - - sleep(1); - partial_try_count++; - } - /* The possible exit conditions: - * 1. File is grabbed - * 2. File is not grabbed, and it is not stale - * 2. File is not grabbed, and it is stale. - */ - - /* - * If temp file was not grabbed for ARCHIVE_TIMEOUT and temp file is not stale, - * then exit with error. - */ - if (out < 0) - { - if (!partial_is_stale) - elog(ERROR, "Failed to open temp WAL file \"%s\" in %i seconds", - to_fullpath_part, archive_timeout); - - /* Partial segment is considered stale, so reuse it */ - elog(LOG, "Reusing stale temp WAL file \"%s\"", to_fullpath_part); - if (fio_remove(FIO_BACKUP_HOST, to_fullpath_part, false) != 0) - elog(ERROR, "Cannot remove stale temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); - - out = fio_open(FIO_BACKUP_HOST, to_fullpath_part, O_RDWR | O_CREAT | O_EXCL | PG_BINARY); - if (out < 0) - elog(ERROR, "Cannot open temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); - } - -part_opened: - elog(VERBOSE, "Temp WAL file successfully created: \"%s\"", to_fullpath_part); - /* Check if possible to skip copying */ - if (fileExists(to_fullpath, FIO_BACKUP_HOST)) - { - pg_crc32 crc32_src; - pg_crc32 crc32_dst; - - crc32_src = fio_get_crc32(FIO_DB_HOST, from_fullpath, false); - crc32_dst = fio_get_crc32(FIO_BACKUP_HOST, to_fullpath, false); - - if (crc32_src == crc32_dst) - { - elog(LOG, "WAL file already exists in archive with the same " - "checksum, skip pushing: \"%s\"", from_fullpath); - /* cleanup */ - fclose(in); - fio_close(out); - if (fio_remove(FIO_BACKUP_HOST, to_fullpath_part, false) != 0) - elog(WARNING, "Cannot remove temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); - return 1; - } - else - { - if (overwrite) - elog(LOG, "WAL file already exists in archive with " - "different checksum, overwriting: \"%s\"", to_fullpath); - else - { - /* Overwriting is forbidden, - * so we must unlink partial file and exit with error. - */ - if (fio_remove(FIO_BACKUP_HOST, to_fullpath_part, false) != 0) - elog(WARNING, "Cannot remove temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); - elog(ERROR, "WAL file already exists in archive with " - "different checksum: \"%s\"", to_fullpath); - } - } - } - - /* copy content */ - errno = 0; - for (;;) - { - size_t read_len = 0; - - read_len = fread(buf, 1, OUT_BUF_SIZE, in); - - if (ferror(in)) - { - int save_errno = errno; - if (fio_remove(FIO_BACKUP_HOST, to_fullpath_part, false) != 0) - elog(WARNING, "Cannot remove temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); - elog(ERROR, "Cannot read source file \"%s\": %s", - from_fullpath, strerror(save_errno)); - } - - if (read_len > 0 && fio_write_async(out, buf, read_len) != read_len) - { - int save_errno = errno; - if (fio_remove(FIO_BACKUP_HOST, to_fullpath_part, false) != 0) - elog(WARNING, "Cannot cleanup temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); - elog(ERROR, "Cannot write to destination temp file \"%s\": %s", - to_fullpath_part, strerror(save_errno)); - } - - if (feof(in)) - break; - } - - /* close source file */ - fclose(in); - - /* Writing is asynchronous in case of push in remote mode, so check agent status */ - if (fio_check_error_fd(out, &errmsg)) - { - if (fio_remove(FIO_BACKUP_HOST, to_fullpath_part, false) != 0) - elog(WARNING, "Cannot cleanup temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); - elog(ERROR, "Cannot write to the remote file \"%s\": %s", - to_fullpath_part, errmsg); - } - - /* close temp file */ - if (fio_close(out) != 0) - { - int save_errno = errno; - if (fio_remove(FIO_BACKUP_HOST, to_fullpath_part, false) != 0) - elog(WARNING, "Cannot cleanup temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); - elog(ERROR, "Cannot close temp WAL file \"%s\": %s", - to_fullpath_part, strerror(save_errno)); - } - - /* sync temp file to disk */ - if (!no_sync) - { - if (fio_sync(FIO_BACKUP_HOST, to_fullpath_part) != 0) - elog(ERROR, "Failed to sync file \"%s\": %s", - to_fullpath_part, strerror(errno)); - } - - elog(VERBOSE, "Rename \"%s\" to \"%s\"", to_fullpath_part, to_fullpath); - - //copy_file_attributes(from_path, FIO_DB_HOST, to_path_temp, FIO_BACKUP_HOST, true); - - /* Rename temp file to destination file */ - if (fio_rename(FIO_BACKUP_HOST, to_fullpath_part, to_fullpath) < 0) - { - int save_errno = errno; - if (fio_remove(FIO_BACKUP_HOST, to_fullpath_part, false) != 0) - elog(WARNING, "Cannot cleanup temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); - elog(ERROR, "Cannot rename file \"%s\" to \"%s\": %s", - to_fullpath_part, to_fullpath, strerror(save_errno)); - } - - pg_free(buf); - return 0; + err_i remerr = $i(pioRemove, *backup_drive, partpath, false); + if ($haserr(remerr)) + elog(WARNING, "Temp WAL: %s", $errmsg(remerr)); } -#ifdef HAVE_LIBZ /* - * Push WAL segment into archive and apply streaming compression to it. + * Copy non WAL file, such as .backup or .history file, into WAL archive. + * Optionally apply streaming compression to it. * Returns: * 0 - file was successfully pushed * 1 - push was skipped because file already exists in the archive and * has the same checksum */ int -push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, - const char *archive_dir, bool overwrite, bool no_sync, - int compress_level, uint32 archive_timeout) +push_file_internal(const char *wal_file_name, const char *pg_xlog_dir, + const char *archive_dir, bool overwrite, bool no_sync, + bool is_compress, int compress_level, + uint32 archive_timeout) { - FILE *in = NULL; - gzFile out = NULL; - char *buf = pgut_malloc(OUT_BUF_SIZE); - char from_fullpath[MAXPGPATH]; - char to_fullpath[MAXPGPATH]; - char to_fullpath_gz[MAXPGPATH]; - - /* partial handling */ - struct stat st; - - char to_fullpath_gz_part[MAXPGPATH]; - int partial_try_count = 0; - int partial_file_size = 0; - bool partial_is_stale = true; - /* remote agent errormsg */ - char *errmsg = NULL; - - /* from path */ - join_path_components(from_fullpath, pg_xlog_dir, wal_file_name); - canonicalize_path(from_fullpath); - /* to path */ - join_path_components(to_fullpath, archive_dir, wal_file_name); - canonicalize_path(to_fullpath); - - /* destination file with .gz suffix */ - snprintf(to_fullpath_gz, sizeof(to_fullpath_gz), "%s.gz", to_fullpath); - /* destination temp file */ - snprintf(to_fullpath_gz_part, sizeof(to_fullpath_gz_part), "%s.part", to_fullpath_gz); - - /* Open source file for read */ - in = fopen(from_fullpath, PG_BINARY_R); - if (in == NULL) - elog(ERROR, "Cannot open source WAL file \"%s\": %s", - from_fullpath, strerror(errno)); - - /* disable stdio buffering for input file */ - setvbuf(in, NULL, _IONBF, BUFSIZ); - - /* Grab lock by creating temp file in exclusive mode */ - out = fio_gzopen(FIO_BACKUP_HOST, to_fullpath_gz_part, PG_BINARY_W, compress_level); - if (out == NULL) - { - if (errno != EEXIST) - elog(ERROR, "Cannot open temp WAL file \"%s\": %s", - to_fullpath_gz_part, strerror(errno)); - /* Already existing destination temp file is not an error condition */ - } - else - goto part_opened; - - /* - * Partial file already exists, it could have happened due to: - * 1. failed archive-push - * 2. concurrent archiving - * - * For ARCHIVE_TIMEOUT period we will try to create partial file - * and look for the size of already existing partial file, to - * determine if it is changing or not. - * If after ARCHIVE_TIMEOUT we still failed to create partial - * file, we will make a decision about discarding - * already existing partial file. - */ - - while (partial_try_count < archive_timeout) - { - if (fio_stat(FIO_BACKUP_HOST, to_fullpath_gz_part, &st, false) < 0) - { - if (errno == ENOENT) - { - //part file is gone, lets try to grab it - out = fio_gzopen(FIO_BACKUP_HOST, to_fullpath_gz_part, PG_BINARY_W, compress_level); - if (out == NULL) - { - if (errno != EEXIST) - elog(ERROR, "Failed to open temp WAL file \"%s\": %s", - to_fullpath_gz_part, strerror(errno)); - } - else - /* Successfully created partial file */ - break; - } - else - elog(ERROR, "Cannot stat temp WAL file \"%s\": %s", - to_fullpath_gz_part, strerror(errno)); - } - - /* first round */ - if (!partial_try_count) - { - elog(LOG, "Temp WAL file already exists, waiting on it %u seconds: \"%s\"", - archive_timeout, to_fullpath_gz_part); - partial_file_size = st.st_size; - } - - /* file size is changing */ - if (st.st_size > partial_file_size) - partial_is_stale = false; - - sleep(1); - partial_try_count++; - } - /* The possible exit conditions: - * 1. File is grabbed - * 2. File is not grabbed, and it is not stale - * 2. File is not grabbed, and it is stale. - */ - - /* - * If temp file was not grabbed for ARCHIVE_TIMEOUT and temp file is not stale, - * then exit with error. - */ - if (out == NULL) - { - if (!partial_is_stale) - elog(ERROR, "Failed to open temp WAL file \"%s\" in %i seconds", - to_fullpath_gz_part, archive_timeout); - - /* Partial segment is considered stale, so reuse it */ - elog(LOG, "Reusing stale temp WAL file \"%s\"", to_fullpath_gz_part); - if (fio_remove(FIO_BACKUP_HOST, to_fullpath_gz_part, false) != 0) - elog(ERROR, "Cannot remove stale compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); - - out = fio_gzopen(FIO_BACKUP_HOST, to_fullpath_gz_part, PG_BINARY_W, compress_level); - if (out == NULL) - elog(ERROR, "Cannot open temp WAL file \"%s\": %s", - to_fullpath_gz_part, strerror(errno)); - } - -part_opened: - elog(VERBOSE, "Temp WAL file successfully created: \"%s\"", to_fullpath_gz_part); - /* Check if possible to skip copying, - */ - if (fileExists(to_fullpath_gz, FIO_BACKUP_HOST)) - { - pg_crc32 crc32_src; - pg_crc32 crc32_dst; - - /* TODO: what if one of them goes missing? */ - crc32_src = fio_get_crc32(FIO_DB_HOST, from_fullpath, false); - crc32_dst = fio_get_crc32(FIO_BACKUP_HOST, to_fullpath_gz, true); - - if (crc32_src == crc32_dst) - { - elog(LOG, "WAL file already exists in archive with the same " - "checksum, skip pushing: \"%s\"", from_fullpath); - /* cleanup */ - fclose(in); - fio_gzclose(out); - if (fio_remove(FIO_BACKUP_HOST, to_fullpath_gz_part, false) != 0) - elog(WARNING, "Cannot remove compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); - return 1; - } - else - { - if (overwrite) - elog(LOG, "WAL file already exists in archive with " - "different checksum, overwriting: \"%s\"", to_fullpath_gz); - else - { - /* Overwriting is forbidden, - * so we must unlink partial file and exit with error. - */ - if (fio_remove(FIO_BACKUP_HOST, to_fullpath_gz_part, false) != 0) - elog(WARNING, "Cannot remove compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); - elog(ERROR, "WAL file already exists in archive with " - "different checksum: \"%s\"", to_fullpath_gz); - } - } - } - - /* copy content */ - /* TODO: move to separate function */ - for (;;) - { - size_t read_len = 0; - - read_len = fread(buf, 1, OUT_BUF_SIZE, in); - - if (ferror(in)) - { - int save_errno = errno; - if (fio_remove(FIO_BACKUP_HOST, to_fullpath_gz_part, false) != 0) - elog(WARNING, "Cannot remove compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); - elog(ERROR, "Cannot read from source file \"%s\": %s", - from_fullpath, strerror(save_errno)); - } - - if (read_len > 0 && fio_gzwrite(out, buf, read_len) != read_len) - { - int save_errno = errno; - if (fio_remove(FIO_BACKUP_HOST, to_fullpath_gz_part, false) != 0) - elog(WARNING, "Cannot cleanup compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); - elog(ERROR, "Cannot write to compressed temp WAL file \"%s\": %s", - to_fullpath_gz_part, get_gz_error(out, save_errno)); - } - - if (feof(in)) - break; - } - - /* close source file */ - fclose(in); - - /* Writing is asynchronous in case of push in remote mode, so check agent status */ - if (fio_check_error_fd_gz(out, &errmsg)) - { - if (fio_remove(FIO_BACKUP_HOST, to_fullpath_gz_part, false) != 0) - elog(WARNING, "Cannot cleanup remote compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); - elog(ERROR, "Cannot write to the remote compressed file \"%s\": %s", - to_fullpath_gz_part, errmsg); - } - - /* close temp file, TODO: make it synchronous */ - if (fio_gzclose(out) != 0) - { - int save_errno = errno; - if (fio_remove(FIO_BACKUP_HOST, to_fullpath_gz_part, false) != 0) - elog(WARNING, "Cannot cleanup compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); - elog(ERROR, "Cannot close compressed temp WAL file \"%s\": %s", - to_fullpath_gz_part, strerror(save_errno)); - } - - /* sync temp file to disk */ - if (!no_sync) - { - if (fio_sync(FIO_BACKUP_HOST, to_fullpath_gz_part) != 0) - elog(ERROR, "Failed to sync file \"%s\": %s", - to_fullpath_gz_part, strerror(errno)); - } - - elog(VERBOSE, "Rename \"%s\" to \"%s\"", - to_fullpath_gz_part, to_fullpath_gz); - - //copy_file_attributes(from_path, FIO_DB_HOST, to_path_temp, FIO_BACKUP_HOST, true); - - /* Rename temp file to destination file */ - if (fio_rename(FIO_BACKUP_HOST, to_fullpath_gz_part, to_fullpath_gz) < 0) - { - int save_errno = errno; - if (fio_remove(FIO_BACKUP_HOST, to_fullpath_gz_part, false) != 0) - elog(WARNING, "Cannot cleanup compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); - elog(ERROR, "Cannot rename file \"%s\" to \"%s\": %s", - to_fullpath_gz_part, to_fullpath_gz, strerror(save_errno)); - } - - pg_free(buf); - - return 0; -} -#endif - + FOBJ_FUNC_ARP(); + pioFile_i in; + pioFile_i out; + char *buf = pgut_malloc(OUT_BUF_SIZE); /* 1MB buffer */ + char from_fullpath[MAXPGPATH]; + char to_fullpath[MAXPGPATH]; + char to_fullpath_part[MAXPGPATH]; +/* partial handling */ + struct stat st; + int partial_try_count = 0; + ssize_t partial_file_size = 0; + bool partial_is_stale = true; + size_t len; + err_i err = $noerr(); + + pioDrive_i db_drive = pioDriveForLocation(FIO_DB_HOST); + pioDrive_i backup_drive = pioDriveForLocation(FIO_BACKUP_HOST); + + /* from path */ + join_path_components(from_fullpath, pg_xlog_dir, wal_file_name); + canonicalize_path(from_fullpath); + /* to path */ + join_path_components(to_fullpath, archive_dir, wal_file_name); + canonicalize_path(to_fullpath); + if (is_compress) + { + /* destination file with .gz suffix */ + len = ft_strlcat(to_fullpath, ".gz", sizeof(to_fullpath)); + if (len >= sizeof(to_fullpath)) + elog(ERROR, "File path too long: \"%s\"", to_fullpath); + } + /* open destination partial file for write */ + len = snprintf(to_fullpath_part, sizeof(to_fullpath_part), "%s.part", + to_fullpath); + if (len >= sizeof(to_fullpath)) + elog(ERROR, "File path too long: \"%s\"", to_fullpath); + + /* Open source file for read */ + in = $i(pioOpen, db_drive, from_fullpath, O_RDONLY | PG_BINARY, .err = &err); + if ($haserr(err)) + elog(ERROR, "Source file: %s", $errmsg(err)); + + retry_open: + out = $i(pioOpen, backup_drive, to_fullpath_part, + .flags = O_WRONLY | O_CREAT | O_EXCL | PG_BINARY, + .err = &err); + if ($noerr(err)) + goto part_opened; + + if (getErrno(err) != EEXIST) + /* Already existing destination temp file is not an error condition */ + elog(ERROR, "Temp WAL file: %s", $errmsg(err)); + + /* + * Partial file already exists, it could have happened due to: + * 1. failed archive-push + * 2. concurrent archiving + * + * For ARCHIVE_TIMEOUT period we will try to create partial file + * and look for the size of already existing partial file, to + * determine if it is changing or not. + * If after ARCHIVE_TIMEOUT we still failed to create partial + * file, we will make a decision about discarding + * already existing partial file. + */ + + while (partial_try_count < archive_timeout) + { + FOBJ_LOOP_ARP(); + st = $i(pioStat, backup_drive, .path = to_fullpath_part, + .follow_symlink = false, .err = &err); + if ($haserr(err)) + { + if (getErrno(err) == ENOENT) + //part file is gone, lets try to grab it + goto retry_open; + else + elog(ERROR, "Temp WAL: %s", $errmsg(err)); + } + + /* first round */ + if (!partial_try_count) + { + elog(LOG, + "Temp WAL file already exists, waiting on it %u seconds: \"%s\"", + archive_timeout, to_fullpath_part); + partial_file_size = st.st_size; + } + + /* file size is changing */ + if (st.st_size != partial_file_size) + partial_is_stale = false; + + sleep(1); + partial_try_count++; + } + /* The possible exit conditions: + * 1. File is not grabbed, and it is not stale + * 2. File is not grabbed, and it is stale. + */ + + /* + * If temp file was not grabbed for ARCHIVE_TIMEOUT and temp file is not stale, + * then exit with error. + */ + if (!partial_is_stale) + elog(ERROR, "Failed to open temp WAL file \"%s\" in %i seconds", + to_fullpath_part, archive_timeout); + + /* Partial segment is considered stale, so reuse it */ + elog(LOG, "Reusing stale temp WAL file \"%s\"", to_fullpath_part); + err = $i(pioRemove, backup_drive, .path = to_fullpath_part, .missing_ok = false); + if ($haserr(err)) + elog(ERROR, "Temp WAL: %s", $errmsg(err)); + + out = $i(pioOpen, backup_drive, .path = to_fullpath_part, + .flags = O_WRONLY | O_CREAT | O_EXCL | PG_BINARY, + .err = &err); + if ($haserr(err)) + elog(ERROR, "Temp WAL: %s", $errmsg(err)); + + part_opened: + elog(VERBOSE, "Temp WAL file successfully created: \"%s\"", to_fullpath_part); + + if ($i(pioExists, backup_drive, to_fullpath, &err)) + { + pg_crc32 crc32_src; + pg_crc32 crc32_dst; + + crc32_src = $i(pioGetCRC32, db_drive, from_fullpath, + .compressed = false, .err = &err); + if ($haserr(err)) + elog(ERROR, "Cannot count crc32 for source file \"%s\": %s", + from_fullpath, $errmsg(err)); + + crc32_dst = $i(pioGetCRC32, backup_drive, to_fullpath, + .compressed = is_compress, .err = &err); + if ($haserr(err)) + elog(ERROR, "Cannot count crc32 for destination file \"%s\": %s", + to_fullpath, $errmsg(err)); + + if (crc32_src == crc32_dst) + { + elog(LOG, "WAL file already exists in archive with the same " + "checksum, skip pushing: \"%s\"", from_fullpath); + $i(pioClose, in); + $i(pioClose, out); + remove_temp_wal_file(&backup_drive, to_fullpath_part); + return 1; + } + else if (overwrite) + { + elog(LOG, "WAL file already exists in archive with " + "different checksum, overwriting: \"%s\"", + to_fullpath); + } + else + { + $i(pioClose, in); + $i(pioClose, out); + remove_temp_wal_file(&backup_drive, to_fullpath_part); + + elog(ERROR, "WAL file already exists in archive with " + "different checksum: \"%s\"", to_fullpath); + } + } + else if ($haserr(err)) + { + elog(ERROR, "%s", $errmsg(err)); + } + + /* enable streaming compression */ + if (is_compress) #ifdef HAVE_LIBZ -/* - * Show error during work with compressed file - */ -static const char * -get_gz_error(gzFile gzf, int errnum) -{ - int gz_errnum; - const char *errmsg; - - errmsg = fio_gzerror(gzf, &gz_errnum); - if (gz_errnum == Z_ERRNO) - return strerror(errnum); - else - return errmsg; -} + { + pioFilter_i flt = pioGZCompressFilter(compress_level); + err = pioCopy($reduce(pioWriteFlush, out), + $reduce(pioRead, in), + flt); + } + else +#else + elog(ERROR, "Compression is requested, but not compiled it"); #endif + { + err = pioCopy($reduce(pioWriteFlush, out), + $reduce(pioRead, in)); + } + + /* close source file */ + $i(pioClose, in); /* ignore error */ + + if ($haserr(err)) + { + $i(pioClose, out); + remove_temp_wal_file(&backup_drive, to_fullpath_part); + elog(ERROR, "Copy WAL: %s", $errmsg(err)); + } + + err = $i(pioClose, out, .sync = !no_sync); + if ($haserr(err)) + { + remove_temp_wal_file(&backup_drive, to_fullpath_part); + elog(ERROR, "Temp WAL: %s", $errmsg(err)); + } + + /* Rename temp file to destination file */ + err = $i(pioRename, backup_drive, to_fullpath_part, to_fullpath); + if ($haserr(err)) + { + remove_temp_wal_file(&backup_drive, to_fullpath_part); + elog(ERROR, "%s", $errmsg(err)); + } + + free(buf); + return 0; +} /* Copy file attributes */ //static void @@ -1360,286 +1053,144 @@ bool get_wal_file(const char *filename, const char *from_fullpath, const char *to_fullpath, bool prefetch_mode) { - int rc = FILE_MISSING; - FILE *out; - char from_fullpath_gz[MAXPGPATH]; - bool src_partial = false; - - snprintf(from_fullpath_gz, sizeof(from_fullpath_gz), "%s.gz", from_fullpath); - - /* open destination file */ - out = fopen(to_fullpath, PG_BINARY_W); - if (!out) - { - elog(WARNING, "Failed to open file '%s': %s", - to_fullpath, strerror(errno)); - return false; - } - - if (chmod(to_fullpath, FILE_PERMISSION) == -1) - { - elog(WARNING, "Cannot change mode of file '%s': %s", - to_fullpath, strerror(errno)); - fclose(out); - unlink(to_fullpath); - return false; - } + FOBJ_FUNC_ARP(); + pioFile_i out = {NULL}; + pioFile_i in = {NULL}; + char *buf = pgut_malloc(OUT_BUF_SIZE); /* 1MB buffer */ + err_i err = $noerr(); + char from_fullpath_gz[MAXPGPATH]; + bool compressed = false; + bool src_partial = false; + + pioDrive_i db_drive = pioDriveForLocation(FIO_DB_HOST); + pioDrive_i backup_drive = pioDriveForLocation(FIO_BACKUP_HOST); + + snprintf(from_fullpath_gz, sizeof(from_fullpath_gz), "%s.gz", + from_fullpath); + + /* open destination file */ + out = $i(pioOpen, db_drive, .path = to_fullpath, .err = &err, + .flags = O_WRONLY | O_CREAT | O_EXCL | O_TRUNC | PG_BINARY); + if ($haserr(err)) + { + elog(WARNING, "%s", $errmsg(err)); + return false; + } - /* disable buffering for output file */ - setvbuf(out, NULL, _IONBF, BUFSIZ); - /* In prefetch mode, we do look only for full WAL segments - * In non-prefetch mode, do look up '.partial' and '.gz.partial' - * segments. - */ - if (fio_is_remote(FIO_BACKUP_HOST)) - { - char *errmsg = NULL; - /* get file via ssh */ #ifdef HAVE_LIBZ - /* If requested file is regular WAL segment, then try to open it with '.gz' suffix... */ - if (IsXLogFileName(filename)) - rc = fio_send_file_gz(from_fullpath_gz, to_fullpath, out, &errmsg); - if (rc == FILE_MISSING) + /* If requested file is regular WAL segment, then try to open it with '.gz' suffix... */ + if (IsXLogFileName(filename)) + { + in = $i(pioOpen, backup_drive, from_fullpath_gz, O_RDONLY | PG_BINARY, + .err = &err); + compressed = in.self != NULL; + if ($haserr(err) && getErrno(err) != ENOENT) + elog(ERROR, "Source file: %s", $errmsg(err)); + } #endif - /* ... failing that, use uncompressed */ - rc = fio_send_file(from_fullpath, to_fullpath, out, NULL, &errmsg); - - /* When not in prefetch mode, try to use partial file */ - if (rc == FILE_MISSING && !prefetch_mode && IsXLogFileName(filename)) - { - char from_partial[MAXPGPATH]; - -#ifdef HAVE_LIBZ - /* '.gz.partial' goes first ... */ - snprintf(from_partial, sizeof(from_partial), "%s.gz.partial", from_fullpath); - rc = fio_send_file_gz(from_partial, to_fullpath, out, &errmsg); - if (rc == FILE_MISSING) -#endif - { - /* ... failing that, use '.partial' */ - snprintf(from_partial, sizeof(from_partial), "%s.partial", from_fullpath); - rc = fio_send_file(from_partial, to_fullpath, out, NULL, &errmsg); - } - - if (rc == SEND_OK) - src_partial = true; - } - - if (rc == WRITE_FAILED) - elog(WARNING, "Cannot write to file '%s': %s", - to_fullpath, strerror(errno)); - - if (errmsg) - elog(WARNING, "%s", errmsg); - - pg_free(errmsg); - } - else - { - /* get file locally */ + if (in.self == NULL) + { + in = $i(pioOpen, backup_drive, from_fullpath, O_RDONLY | PG_BINARY, + .err = &err); + if ($haserr(err) && getErrno(err) != ENOENT) + elog(ERROR, "Source file: %s", $errmsg(err)); + } + /* try partial file */ + if (in.self == NULL && !prefetch_mode && IsXLogFileName(filename)) + { + char from_partial[MAXPGPATH]; #ifdef HAVE_LIBZ - /* If requested file is regular WAL segment, then try to open it with '.gz' suffix... */ - if (IsXLogFileName(filename)) - rc = get_wal_file_internal(from_fullpath_gz, to_fullpath, out, true); - if (rc == FILE_MISSING) + snprintf(from_partial, sizeof(from_partial), "%s.gz.partial", + from_fullpath); + + in = $i(pioOpen, backup_drive, from_partial, O_RDONLY | PG_BINARY, + .err = &err); + compressed = in.self != NULL; + if ($haserr(err) && getErrno(err) != ENOENT) + elog(ERROR, "Source partial file: %s", $errmsg(err)); #endif - /* ... failing that, use uncompressed */ - rc = get_wal_file_internal(from_fullpath, to_fullpath, out, false); - /* When not in prefetch mode, try to use partial file */ - if (rc == FILE_MISSING && !prefetch_mode && IsXLogFileName(filename)) - { - char from_partial[MAXPGPATH]; + if (in.self == NULL) + { + snprintf(from_partial, sizeof(from_partial), "%s.partial", + from_fullpath); + in = $i(pioOpen, backup_drive, + .path = from_partial, + .flags = O_RDONLY | PG_BINARY, + .err = &err); + if ($haserr(err) && getErrno(err) != ENOENT) + elog(ERROR, "Source partial file: %s", $errmsg(err)); + } + + src_partial = true; + } + + if (in.self == NULL) + { + $i(pioClose, out); + $i(pioRemove, db_drive, to_fullpath, true); + free(buf); + if (!prefetch_mode) + elog(LOG, "Target WAL file is missing: %s", filename); + return false; + } #ifdef HAVE_LIBZ - /* '.gz.partial' goes first ... */ - snprintf(from_partial, sizeof(from_partial), "%s.gz.partial", from_fullpath); - rc = get_wal_file_internal(from_partial, to_fullpath, out, true); - if (rc == FILE_MISSING) + if (compressed) + { + pioFilter_i flt = pioGZDecompressFilter(src_partial); + err = pioCopy($reduce(pioWriteFlush, out), + $reduce(pioRead, in), + flt); + } + else #endif - { - /* ... failing that, use '.partial' */ - snprintf(from_partial, sizeof(from_partial), "%s.partial", from_fullpath); - rc = get_wal_file_internal(from_partial, to_fullpath, out, false); - } - - if (rc == SEND_OK) - src_partial = true; - } - } - - if (!prefetch_mode && (rc == FILE_MISSING)) - elog(LOG, "Target WAL file is missing: %s", filename); - - if (rc < 0) - { - fclose(out); - unlink(to_fullpath); - return false; - } - - /* If partial file was used as source, then it is very likely that destination - * file is not equal to XLOG_SEG_SIZE - that is the way pg_receivexlog works. - * We must manually extent it up to XLOG_SEG_SIZE. - */ - if (src_partial) - { - - if (fflush(out) != 0) - { - elog(WARNING, "Cannot flush file \"%s\": %s", to_fullpath, strerror(errno)); - fclose(out); - unlink(to_fullpath); - return false; - } - - if (ftruncate(fileno(out), xlog_seg_size) != 0) - { - elog(WARNING, "Cannot extend file \"%s\": %s", to_fullpath, strerror(errno)); - fclose(out); - unlink(to_fullpath); - return false; - } - } - - if (fclose(out) != 0) - { - elog(WARNING, "Cannot close file '%s': %s", to_fullpath, strerror(errno)); - unlink(to_fullpath); - return false; - } - - elog(LOG, "WAL file successfully %s: %s", - prefetch_mode ? "prefetched" : "copied", filename); - return true; -} - -/* - * Copy WAL segment with possible decompression from local archive. - * Return codes: - * FILE_MISSING (-1) - * OPEN_FAILED (-2) - * READ_FAILED (-3) - * WRITE_FAILED (-4) - * ZLIB_ERROR (-5) - */ -int -get_wal_file_internal(const char *from_path, const char *to_path, FILE *out, - bool is_decompress) -{ -#ifdef HAVE_LIBZ - gzFile gz_in = NULL; -#endif - FILE *in = NULL; - char *buf = pgut_malloc(OUT_BUF_SIZE); /* 1MB buffer */ - int exit_code = 0; - - elog(VERBOSE, "Attempting to %s WAL file '%s'", - is_decompress ? "open compressed" : "open", from_path); - - /* open source file for read */ - if (!is_decompress) - { - in = fopen(from_path, PG_BINARY_R); - if (in == NULL) - { - if (errno == ENOENT) - exit_code = FILE_MISSING; - else - { - elog(WARNING, "Cannot open source WAL file \"%s\": %s", - from_path, strerror(errno)); - exit_code = OPEN_FAILED; - } - goto cleanup; - } - - /* disable stdio buffering */ - setvbuf(out, NULL, _IONBF, BUFSIZ); - } -#ifdef HAVE_LIBZ - else - { - gz_in = gzopen(from_path, PG_BINARY_R); - if (gz_in == NULL) - { - if (errno == ENOENT) - exit_code = FILE_MISSING; - else - { - elog(WARNING, "Cannot open compressed WAL file \"%s\": %s", - from_path, strerror(errno)); - exit_code = OPEN_FAILED; - } - - goto cleanup; - } - } -#endif - - /* copy content */ - for (;;) - { - int read_len = 0; - -#ifdef HAVE_LIBZ - if (is_decompress) - { - read_len = gzread(gz_in, buf, OUT_BUF_SIZE); - - if (read_len <= 0) - { - if (gzeof(gz_in)) - break; - else - { - elog(WARNING, "Cannot read compressed WAL file \"%s\": %s", - from_path, get_gz_error(gz_in, errno)); - exit_code = READ_FAILED; - break; - } - } - } - else -#endif - { - read_len = fread(buf, 1, OUT_BUF_SIZE, in); - - if (ferror(in)) - { - elog(WARNING, "Cannot read source WAL file \"%s\": %s", - from_path, strerror(errno)); - exit_code = READ_FAILED; - break; - } - - if (read_len == 0 && feof(in)) - break; - } - - if (read_len > 0) - { - if (fwrite(buf, 1, read_len, out) != read_len) - { - elog(WARNING, "Cannot write to WAL file '%s': %s", - to_path, strerror(errno)); - exit_code = WRITE_FAILED; - break; - } - } - } - -cleanup: -#ifdef HAVE_LIBZ - if (gz_in) - gzclose(gz_in); -#endif - if (in) - fclose(in); - - pg_free(buf); - return exit_code; + { + err = pioCopy($reduce(pioWriteFlush, out), + $reduce(pioRead, in)); + } + + /* close source file */ + $i(pioClose, in); /* ignore error */ + + if ($haserr(err)) + { + $i(pioClose, out); + $i(pioRemove, db_drive, to_fullpath, true); + elog(ERROR, "%s", $errmsg(err)); + } + + /* If partial file was used as source, then it is very likely that destination + * file is not equal to XLOG_SEG_SIZE - that is the way pg_receivexlog works. + * We must manually extent it up to XLOG_SEG_SIZE. + */ + if (src_partial) + { + err = $i(pioTruncate, out, xlog_seg_size); + if ($haserr(err)) + { + elog(WARNING, "Extend file: %s", $errmsg(err)); + $i(pioClose, out); + $i(pioRemove, db_drive, to_fullpath, true); + free(buf); + return false; + } + } + + err = $i(pioClose, out); + if ($haserr(err)) + { + elog(WARNING, "%s", $errmsg(err)); + $i(pioRemove, db_drive, to_fullpath, true); + free(buf); + return false; + } + + elog(LOG, "WAL file successfully %s: %s", + prefetch_mode ? "prefetched" : "copied", filename); + free(buf); + return true; } bool next_wal_segment_exists(TimeLineID tli, XLogSegNo segno, const char *prefetch_dir, uint32 wal_seg_size) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 2aec18a56..6f6dcdff6 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1218,7 +1218,6 @@ extern int fio_send_pages(const char *to_fullpath, const char *from_fullpath, pg extern int fio_copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, XLogRecPtr horizonLsn, int calg, int clevel, uint32 checksum_version, bool use_pagemap, BlockNumber *err_blknum, char **errormsg); -extern int fio_send_file_gz(const char *from_fullpath, const char *to_fullpath, FILE* out, char **errormsg); extern int fio_send_file(const char *from_fullpath, const char *to_fullpath, FILE* out, pgFile *file, char **errormsg); diff --git a/src/utils/file.c b/src/utils/file.c index 3d74c37f9..7eb2c4329 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -2571,166 +2571,6 @@ fio_send_pages_impl(int out, char* buf) return; } -/* Receive chunks of compressed data, decompress them and write to - * destination file. - * Return codes: - * FILE_MISSING (-1) - * OPEN_FAILED (-2) - * READ_FAILED (-3) - * WRITE_FAILED (-4) - * ZLIB_ERROR (-5) - * REMOTE_ERROR (-6) - */ -int -fio_send_file_gz(const char *from_fullpath, const char *to_fullpath, FILE* out, char **errormsg) -{ - fio_header hdr; - int exit_code = SEND_OK; - char *in_buf = pgut_malloc(CHUNK_SIZE); /* buffer for compressed data */ - char *out_buf = pgut_malloc(OUT_BUF_SIZE); /* 1MB buffer for decompressed data */ - size_t path_len = strlen(from_fullpath) + 1; - /* decompressor */ - z_stream *strm = NULL; - - hdr.cop = FIO_SEND_FILE; - hdr.size = path_len; - -// elog(VERBOSE, "Thread [%d]: Attempting to open remote compressed WAL file '%s'", -// thread_num, from_fullpath); - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(fio_stdout, from_fullpath, path_len), path_len); - - for (;;) - { - fio_header hdr; - IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); - - if (hdr.cop == FIO_SEND_FILE_EOF) - { - break; - } - else if (hdr.cop == FIO_ERROR) - { - /* handle error, reported by the agent */ - if (hdr.size > 0) - { - IO_CHECK(fio_read_all(fio_stdin, in_buf, hdr.size), hdr.size); - *errormsg = pgut_malloc(hdr.size); - snprintf(*errormsg, hdr.size, "%s", in_buf); - } - exit_code = hdr.arg; - goto cleanup; - } - else if (hdr.cop == FIO_PAGE) - { - int rc; - Assert(hdr.size <= CHUNK_SIZE); - IO_CHECK(fio_read_all(fio_stdin, in_buf, hdr.size), hdr.size); - - /* We have received a chunk of compressed data, lets decompress it */ - if (strm == NULL) - { - /* Initialize decompressor */ - strm = pgut_malloc(sizeof(z_stream)); - memset(strm, 0, sizeof(z_stream)); - - /* The fields next_in, avail_in initialized before init */ - strm->next_in = (Bytef *)in_buf; - strm->avail_in = hdr.size; - - rc = inflateInit2(strm, 15 + 16); - - if (rc != Z_OK) - { - *errormsg = pgut_malloc(ERRMSG_MAX_LEN); - snprintf(*errormsg, ERRMSG_MAX_LEN, - "Failed to initialize decompression stream for file '%s': %i: %s", - from_fullpath, rc, strm->msg); - exit_code = ZLIB_ERROR; - goto cleanup; - } - } - else - { - strm->next_in = (Bytef *)in_buf; - strm->avail_in = hdr.size; - } - - strm->next_out = (Bytef *)out_buf; /* output buffer */ - strm->avail_out = OUT_BUF_SIZE; /* free space in output buffer */ - - /* - * From zlib documentation: - * The application must update next_in and avail_in when avail_in - * has dropped to zero. It must update next_out and avail_out when - * avail_out has dropped to zero. - */ - while (strm->avail_in != 0) /* while there is data in input buffer, decompress it */ - { - /* decompress until there is no data to decompress, - * or buffer with uncompressed data is full - */ - rc = inflate(strm, Z_NO_FLUSH); - if (rc == Z_STREAM_END) - /* end of stream */ - break; - else if (rc != Z_OK) - { - /* got an error */ - *errormsg = pgut_malloc(ERRMSG_MAX_LEN); - snprintf(*errormsg, ERRMSG_MAX_LEN, - "Decompression failed for file '%s': %i: %s", - from_fullpath, rc, strm->msg); - exit_code = ZLIB_ERROR; - goto cleanup; - } - - if (strm->avail_out == 0) - { - /* Output buffer is full, write it out */ - if (fwrite(out_buf, 1, OUT_BUF_SIZE, out) != OUT_BUF_SIZE) - { - exit_code = WRITE_FAILED; - goto cleanup; - } - - strm->next_out = (Bytef *)out_buf; /* output buffer */ - strm->avail_out = OUT_BUF_SIZE; - } - } - - /* write out leftovers if any */ - if (strm->avail_out != OUT_BUF_SIZE) - { - int len = OUT_BUF_SIZE - strm->avail_out; - - if (fwrite(out_buf, 1, len, out) != len) - { - exit_code = WRITE_FAILED; - goto cleanup; - } - } - } - else - elog(ERROR, "Remote agent returned message of unexpected type: %i", hdr.cop); - } - -cleanup: - if (exit_code < OPEN_FAILED) - fio_disconnect(); /* discard possible pending data in pipe */ - - if (strm) - { - inflateEnd(strm); - pg_free(strm); - } - - pg_free(in_buf); - pg_free(out_buf); - return exit_code; -} - /* Receive chunks of data and write them to destination file. * Return codes: * SEND_OK (0) From a36f6266a5db41fd6b77618e57f8a7971b9541b8 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 8 Jul 2022 03:45:37 +0300 Subject: [PATCH 025/339] fu_util: reduce macro expansions --- src/fu_util/fm_util.h | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/src/fu_util/fm_util.h b/src/fu_util/fm_util.h index 5c5691512..37eb681e6 100644 --- a/src/fu_util/fm_util.h +++ b/src/fu_util/fm_util.h @@ -149,19 +149,14 @@ #endif #define fm_eval(...) fm__eval_0(__VA_ARGS__) -#define fm_eval2(...) fm__eval_0(__VA_ARGS__) -#define fm_eval3(...) fm__eval_0(__VA_ARGS__) -#define fm_eval4(...) fm__eval_0(__VA_ARGS__) -#define fm_eval5(...) fm__eval_0(__VA_ARGS__) -#define fm__eval_0(...) fm__eval_1(fm__eval_1(fm__eval_1(fm__eval_1(__VA_ARGS__)))) -#define fm__eval_1(...) fm__eval_2(fm__eval_2(fm__eval_2(__VA_ARGS__))) -#define fm__eval_2(...) fm__eval_3(fm__eval_3(fm__eval_3(__VA_ARGS__))) #ifdef FU_LONG_EVAL -#define fm__eval_3(...) fm__eval_4(fm__eval_4(fm__eval_4(__VA_ARGS__))) -#define fm__eval_4(...) __VA_ARGS__ +#define fm__eval_0(...) fm__eval_1(fm__eval_1(fm__eval_1(fm__eval_1(__VA_ARGS__)))) #else -#define fm__eval_3(...) __VA_ARGS__ +#define fm__eval_0(...) fm__eval_1(fm__eval_1(__VA_ARGS__)) #endif +#define fm__eval_1(...) fm__eval_2(fm__eval_2(__VA_ARGS__)) +#define fm__eval_2(...) fm__eval_3(fm__eval_3(__VA_ARGS__)) +#define fm__eval_3(...) __VA_ARGS__ #define fm_foreach(macro, ...) \ fm_when(fm_va_01(__VA_ARGS__))( \ From fcb36602a2bf082b34204300c2d813553ceb0cbf Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 8 Jul 2022 15:59:22 +0300 Subject: [PATCH 026/339] pioFilter: not optional pioFinish --- src/utils/file.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/utils/file.h b/src/utils/file.h index 33b5444b3..6d10bf33d 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -283,7 +283,7 @@ fobj_method(pioTransform); #define mth__pioFinish size_t, (ft_bytes_t, out), (err_i*, err) fobj_method(pioFinish); -#define iface__pioFilter mth(pioTransform), opt(pioFinish) +#define iface__pioFilter mth(pioTransform, pioFinish) fobj_iface(pioFilter); #define kls__pioReadFilter mth(pioRead, pioClose) From e37dd39bdf785363956f2f6f1f1e79bf98d6a295 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 8 Jul 2022 16:10:11 +0300 Subject: [PATCH 027/339] remove opt iface methods + reduce generated boilerplate --- src/fu_util/CMakeLists.txt | 26 ++++--- src/fu_util/fo_obj.h | 3 - src/fu_util/impl/fo_impl.c | 41 +++++++++- src/fu_util/impl/fo_impl.h | 149 +++++++++++++++++++------------------ src/fu_util/test/obj1.c | 4 +- 5 files changed, 132 insertions(+), 91 deletions(-) diff --git a/src/fu_util/CMakeLists.txt b/src/fu_util/CMakeLists.txt index f43152003..6752d5dd2 100644 --- a/src/fu_util/CMakeLists.txt +++ b/src/fu_util/CMakeLists.txt @@ -14,18 +14,20 @@ target_link_libraries(fu_utils PRIVATE Threads::Threads) # Detect for installed beautiful https://github.com/ianlancetaylor/libbacktrace include_directories(.) -find_library(LIBBACKTRACE backtrace) -if(LIBBACKTRACE) - set(CMAKE_REQUIRED_LIBRARIES backtrace) - check_c_source_compiles(" - #include - int main(void) { - struct backtrace_state *st = backtrace_create_state(NULL, 0, NULL, NULL); - return 0; - } - " HAVE_LIBBACKTRACE) - if (HAVE_LIBBACKTRACE) - target_compile_definitions(fu_utils PRIVATE HAVE_LIBBACKTRACE) +if(NOT CMAKE_C_COMPILER MATCHES tcc) + find_library(LIBBACKTRACE backtrace) + if(LIBBACKTRACE) + set(CMAKE_REQUIRED_LIBRARIES backtrace) + check_c_source_compiles(" + #include + int main(void) { + struct backtrace_state *st = backtrace_create_state(NULL, 0, NULL, NULL); + return 0; + } + " HAVE_LIBBACKTRACE) + if (HAVE_LIBBACKTRACE) + target_compile_definitions(fu_utils PRIVATE HAVE_LIBBACKTRACE) + endif() endif() endif() diff --git a/src/fu_util/fo_obj.h b/src/fu_util/fo_obj.h index 43fbf5c33..731aec63b 100644 --- a/src/fu_util/fo_obj.h +++ b/src/fu_util/fo_obj.h @@ -347,9 +347,6 @@ extern fobj_klass_handle_t fobj_real_klass_of(fobj_t); #define $ifdef(assignment, meth, self, ...) \ fobj_ifdef(assignment, meth, (self), __VA_ARGS__) -#define $iifdef(assignment, meth, iface, ...) \ - fobj_iface_ifdef(assignment, meth, iface, __VA_ARGS__) - #define $bind(iface_type, obj) fobj_bind(iface_type, (obj)) #define $reduce(newiface, iface) fobj_reduce(newiface, (iface)) diff --git a/src/fu_util/impl/fo_impl.c b/src/fu_util/impl/fo_impl.c index 89dacc423..cc85510c0 100644 --- a/src/fu_util/impl/fo_impl.c +++ b/src/fu_util/impl/fo_impl.c @@ -180,7 +180,7 @@ fobj_klass_method_search(fobj_klass_handle_t klass, fobj_method_handle_t meth) { fobj__method_callback_t -fobj_method_search(const fobj_t self, fobj_method_handle_t meth, fobj_klass_handle_t for_child) { +fobj_method_search(const fobj_t self, fobj_method_handle_t meth, fobj_klass_handle_t for_child, bool validate) { fobj_header_t *h; fobj_klass_handle_t klass; fobj__method_callback_t cb = {self, NULL}; @@ -191,10 +191,18 @@ fobj_method_search(const fobj_t self, fobj_method_handle_t meth, fobj_klass_hand ft_assert(meth != fobj__nm_mhandle(fobjDispose)()); } + if (self == NULL) { + if (validate) + ft_assert(self != NULL, "Call '%s' on NULL object", fobj_methods[meth].name); + return cb; + } + h = ((fobj_header_t*)self - 1); assert(h->magic == FOBJ_HEADER_MAGIC); klass = h->klass; ft_dbg_assert(klass > 0 && klass <= atload(&fobj_klasses_n)); + ft_assert((h->flags & FOBJ_DISPOSED) == 0, "Call '%s' on disposed object '%s'", + fobj_methods[meth].name, fobj_klasses[klass].name); if (ft_unlikely(for_child != 0)) { if (ft_unlikely(ft_dbg_enabled())) { @@ -224,6 +232,9 @@ fobj_method_implements(const fobj_t self, fobj_method_handle_t meth) { fobj_header_t *h; fobj_klass_handle_t klass; + if (self == NULL) + return false; + ft_assert(fobj_global_state != FOBJ_RT_NOT_INITIALIZED); if (ft_dbg_enabled()) { ft_assert(meth > 0 && meth <= atload(&fobj_methods_n)); @@ -244,6 +255,34 @@ fobj_method_implements(const fobj_t self, fobj_method_handle_t meth) { return false; } +extern void +fobj__validate_args(fobj_method_handle_t meth, + fobj_t self, + const char** paramnames, + const char *set, + size_t cnt) { + fobj_header_t *h; + fobj_klass_handle_t klass; + size_t i; + + ft_assert(fobj_global_state != FOBJ_RT_NOT_INITIALIZED); + ft_assert(meth > 0 && meth <= atload(&fobj_methods_n)); + ft_assert(meth != fobj__nm_mhandle(fobjDispose)()); + ft_assert(self != NULL, "call '%s' on NULL object", fobj_methods[meth].name); + + h = ((fobj_header_t*)self - 1); + assert(h->magic == FOBJ_HEADER_MAGIC); + klass = h->klass; + ft_dbg_assert(klass > 0 && klass <= atload(&fobj_klasses_n)); + + for (i = 0; i < cnt; i++) { + ft_assert(set[i] != 0, "Calling '%s' on '%s' miss argument '%s'", + fobj_methods[meth].name, + fobj_klasses[klass].name, + paramnames[i]); + } +} + const char * fobj_klass_name(fobj_klass_handle_t klass) { fobj_klass_registration_t *reg; diff --git a/src/fu_util/impl/fo_impl.h b/src/fu_util/impl/fo_impl.h index 3e97c39a8..36597320c 100644 --- a/src/fu_util/impl/fo_impl.h +++ b/src/fu_util/impl/fo_impl.h @@ -16,15 +16,20 @@ typedef uint16_t fobj_method_handle_t; #define fobj__pop_ignore_initializer_overrides \ _Pragma("clang diagnostic pop") #else -#define fobj__push_ignore_initializer_overrides -#define fobj__pop_ignore_initializer_overrides +#define fobj__push_ignore_initializer_overrides \ + _Pragma("GCC diagnostic push"); \ + _Pragma("GCC diagnostic ignored \"-Woverride-init-side-effects\"") +#define fobj__pop_ignore_initializer_overrides \ + _Pragma("GCC diagnostic pop") #endif #ifndef NDEBUG -typedef struct { unsigned char is_set: 1; } *fobj__missing_argument_detector; -#define fobj__dumb_arg ((fobj__missing_argument_detector)(uintptr_t)1) -#define fobj__check_arg(name) ft_dbg_assert(fobj__nm_given(name) != NULL); +typedef struct fobj__missing_argument_detector { + char is_set; +} fobj__missing_argument_detector; +#define fobj__dumb_arg ((fobj__missing_argument_detector){1}) +#define fobj__check_arg(name) fobj__nm_given(name).is_set #else @@ -50,7 +55,6 @@ typedef struct { fm_cat(fobj__map_param_, param) #define fobj__map_param_varsized(...) (varsized, __VA_ARGS__) #define fobj__map_param_mth(...) (mth, __VA_ARGS__) -#define fobj__map_param_opt(...) (opt, __VA_ARGS__) #define fobj__map_param_iface(...) (iface, __VA_ARGS__) #define fobj__map_param_inherits(parent) (inherits, parent) @@ -121,14 +125,14 @@ typedef struct { if (h) return h; \ fobj_method_init_impl(&hndl, fm_str(meth)); \ return hndl; \ - } \ + } \ \ typedef res (* impl_meth_t)(fobj_t self comma fobj__mapArgs_toArgs(__VA_ARGS__)); \ \ typedef struct params_t { \ fobj__missing_argument_detector fobj__dumb_first_param; \ fobj__mapArgs_toFields(__VA_ARGS__) \ - } params_t; \ + } params_t; \ \ typedef struct cb_meth_t { \ fobj_t self; \ @@ -136,19 +140,15 @@ typedef struct { } cb_meth_t; \ \ ft_inline cb_meth_t \ - cb_meth(fobj_t self, fobj_klass_handle_t parent) { \ + cb_meth(fobj_t self, fobj_klass_handle_t parent, bool validate) { \ fobj__method_callback_t fnd = {NULL, NULL}; \ - if (self != NULL) { \ - fnd = fobj_method_search(self, handle(), parent); \ - } \ + fnd = fobj_method_search(self, handle(), parent, validate); \ return (cb_meth_t){fnd.self, fnd.impl}; \ } \ \ ft_inline res \ meth(fobj_t self comma fobj__mapArgs_toArgs(__VA_ARGS__)) { \ - cb_meth_t cb = cb_meth(self, fobj_self_klass); \ - ft_assert(cb.impl != NULL && cb.self != NULL); \ - ft_dbg_assert(!fobj__disposed(cb.self)); \ + cb_meth_t cb = cb_meth(self, fobj_self_klass, true); \ return cb.impl(cb.self comma fobj__mapArgs_toNames(__VA_ARGS__)); \ } \ \ @@ -169,18 +169,16 @@ typedef struct { \ ft_inline meth_i \ bind_meth(fobj_t self) { \ - meth_i _iface = (meth_i){.self = self}; \ - ft_assert(cb_meth(self, fobj_self_klass).impl != NULL); \ - return _iface; \ + ft_assert(fobj_method_implements(self, handle())); \ + return (meth_i){self}; \ } \ \ ft_inline bool \ implements_meth(fobj_t self, meth_i *ifacep) { \ - meth_i _iface = (meth_i){.self = self}; \ - cb_meth_t cb = cb_meth(self, fobj_self_klass); \ + bool has = fobj_method_implements(self, handle()); \ if (ifacep != NULL) \ - *ifacep = cb.impl != NULL ? _iface : (meth_i){NULL}; \ - return cb.impl != NULL; \ + ifacep->self = has ? self : NULL; \ + return has; \ } \ \ ft_inline meth_i \ @@ -196,10 +194,12 @@ typedef struct { } \ \ ft_inline res \ - invoke_methparams(cb_meth_t cb, params_t params) { \ - ft_assert(cb.impl != NULL && cb.self != NULL); \ - ft_dbg_assert(!fobj__disposed(cb.self)); \ - fobj__assertArgs(__VA_ARGS__) \ + invoke_methparams(cb_meth_t cb, params_t params) {\ + if (!(fobj__assertArgsAnd(__VA_ARGS__))) { \ + const char *params_s[] = { fobj__mapArgs_toNameStrs(__VA_ARGS__) }; \ + char set[] = {fobj__assertArgsVals(__VA_ARGS__)}; \ + fobj__validate_args(handle(), cb.self, params_s, set, ft_arrsz(params_s)); \ + } \ return cb.impl(cb.self comma fobj__mapArgs_toNamedParams(__VA_ARGS__)); \ } \ \ @@ -209,9 +209,14 @@ typedef struct { #define fobj__mapArgs_toArgs(...) \ fm_eval(fm_foreach_tuple_comma(fobj__mapArgs_toArgs_do, __VA_ARGS__)) +#ifndef NDEBUG #define fobj__mapArgs_toFields_do(x, y, ...) \ x y; \ fobj__missing_argument_detector fobj__nm_given(y); +#else +#define fobj__mapArgs_toFields_do(x, y, ...) \ + x y; +#endif #define fobj__mapArgs_toFields(...) \ fm_eval(fm_foreach_tuple(fobj__mapArgs_toFields_do, __VA_ARGS__)) @@ -219,10 +224,30 @@ typedef struct { #define fobj__mapArgs_toNames(...) \ fm_eval(fm_foreach_tuple_comma(fobj__mapArgs_toNames_do, __VA_ARGS__)) +#define fobj__mapArgs_toNameStrs_do(x, y, ...) #y +#define fobj__mapArgs_toNameStrs(...) \ + fm_eval(fm_foreach_tuple_comma(fobj__mapArgs_toNameStrs_do, __VA_ARGS__)) + #define fobj__mapArgs_toNamedParams_do(x, y, ...) params.y #define fobj__mapArgs_toNamedParams(...) \ fm_eval(fm_foreach_tuple_comma(fobj__mapArgs_toNamedParams_do, __VA_ARGS__)) +#ifndef NDEBUG +#define fobj__assertArgsAnd_do(x, y, ...) & fobj__check_arg(params.y) +#define fobj__assertArgsAnd(...) \ + 1 fm_eval(fm_foreach_tuple(fobj__assertArgsAnd_do, __VA_ARGS__)) +#else +#define fobj__assertArgsAnd(...) 1 +#endif + +#ifndef NDEBUG +#define fobj__assertArgsVals_do(x, y, ...) fobj__check_arg(params.y) +#define fobj__assertArgsVals(...) \ + fm_eval(fm_foreach_tuple_comma(fobj__assertArgsVals_do, __VA_ARGS__)) +#else +#define fobj__assertArgsVals(...) +#endif + #define fobj__assertArgs_do(x, y, ...) fobj__check_arg(params.y) #define fobj__assertArgs(...) \ fm_eval(fm_foreach_tuple(fobj__assertArgs_do, __VA_ARGS__)) @@ -251,28 +276,6 @@ typedef struct { \ fm__dumb_require_semicolon -#if defined(NDEBUG) || defined(__clang__) || defined(__TINYC__) -#define fobj__disposed fobj_disposed -#else -typedef struct fobj__header { -#define FOBJ__HEADER_MAGIC UINT64_C(0x1234567890abcdef) - uint64_t magic; - volatile uint32_t rc; - volatile uint16_t flags; - fobj_klass_handle_t klass; -} fobj__header_t; - -ft_inline bool -fobj__disposed(fobj_t self) { - fobj__header_t *h; - - ft_assert(self != NULL); - h = ((fobj__header_t*)self - 1); - assert(h->magic == FOBJ__HEADER_MAGIC); - return (__atomic_load_n(&h->flags, __ATOMIC_ACQUIRE) & 2) != 0; -} -#endif - /* Klass declarations */ #define fobj__klass_declare(klass) \ @@ -317,7 +320,6 @@ fobj__disposed(fobj_t self) { #define fobj__klass_detect_size_varsized(klass, ...) \ fm_cat(fobj__klass_detect_size_varsized_, fm_va_01(__VA_ARGS__))(klass, __VA_ARGS__) #define fobj__klass_detect_size_mth(...) -#define fobj__klass_detect_size_opt(...) #define fobj__klass_detect_size_inherits(klass, parent) \ kparent = fobj__nm_khandle(parent)(); #define fobj__klass_detect_size_iface(...) @@ -336,8 +338,6 @@ fobj__disposed(fobj_t self) { #define fobj__klass_decl_methods_mth(klass, ...) \ fm_recurs(fobj__klass_decl_method_loop)(klass, __VA_ARGS__) -#define fobj__klass_decl_methods_opt(klass, ...) \ - fm_recurs(fobj__klass_decl_method_loop)(klass, __VA_ARGS__) #define fobj__klass_decl_methods_varsized(...) #define fobj__klass_decl_methods_inherits(klass, parent) #define fobj__klass_decl_methods_iface(...) @@ -349,7 +349,6 @@ fobj__disposed(fobj_t self) { #define fobj__klass_has_iface_varsized #define fobj__klass_has_iface_mth -#define fobj__klass_has_iface_opt #define fobj__klass_has_iface_inherits #define fobj__klass_has_iface_iface 1 #define fobj__klass_has_iface_impl(tag, ...) \ @@ -359,7 +358,6 @@ fobj__disposed(fobj_t self) { #define fobj__klass_check_dispatch_varsized(...) #define fobj__klass_check_dispatch_mth(...) -#define fobj__klass_check_dispatch_opt(...) #define fobj__klass_check_dispatch_inherits(...) #define fobj__klass_check_dispatch_iface(klass, ...) \ fm_recurs(fobj__klass_check_dispatch_iface_i)(klass, __VA_ARGS__) @@ -463,7 +461,6 @@ fobj__disposed(fobj_t self) { ((fobj__nm_iface_i(newifacetype)){.self = (oldiface).self}) #endif -#define fobj__mapMethods_toCopyChecks_do_opt(meth) #define fobj__mapMethods_toCopyChecks_do_mth(meth) \ _new_iface_.fobj__nm_has(meth) = _old_iface_.fobj__nm_has(meth); #define fobj__mapMethods_toCopyChecks_loop(tag, ...) \ @@ -483,7 +480,6 @@ fobj__disposed(fobj_t self) { #define fobj__macroIsIface(iface) \ fm_is_empty(fm_eval(fobj__macroIsIface_i(fobj__nm_iface(iface)))) #define fobj__macroIsIface_mth(...) -#define fobj__macroIsIface_opt(...) #define fobj__macroIsIface_do(x) \ fobj__macroIsIface_##x #define fobj__macroIsIface_i(...) \ @@ -507,7 +503,6 @@ fobj__disposed(fobj_t self) { #define fobj__mapMethods_toFields(...) \ fm_eval_tuples(fobj__mapMethods_toFields_do, __VA_ARGS__) -#define fobj__mapMethods_toSetters_do_opt(meth) #define fobj__mapMethods_toSetters_do_mth(meth) \ ft_assert(fobj_method_implements(self, fobj__nm_mhandle(meth)())); #define fobj__mapMethods_toSetters_loop(tag, ...) \ @@ -517,7 +512,6 @@ fobj__disposed(fobj_t self) { #define fobj__mapMethods_toSetters(...) \ fm_eval_tuples(fobj__mapMethods_toSetters_do, __VA_ARGS__) -#define fobj__mapMethods_toIfSetters_do_opt(meth) #define fobj__mapMethods_toIfSetters_do_mth(meth) \ if (!fobj_method_implements(self, fobj__nm_mhandle(meth)())) all_ok = false; #define fobj__mapMethods_toIfSetters_loop(tag, ...) \ @@ -527,7 +521,6 @@ fobj__disposed(fobj_t self) { #define fobj__mapMethods_toIfSetters(...) \ fm_eval_tuples(fobj__mapMethods_toIfSetters_do, __VA_ARGS__) -#define fobj__kvalidateMethods_do_opt(meth) #define fobj__kvalidateMethods_do_mth(meth) \ ft_assert(fobj_klass_method_search(khandle, fobj__nm_mhandle(meth)()) != NULL); #define fobj__kvalidateMethods_loop(tag, ...) \ @@ -540,10 +533,10 @@ fobj__disposed(fobj_t self) { /* Method invocation */ #define fobj_call(meth, self, ...) \ - fobj__nm_invoke(meth)(fobj__nm_cb(meth)(self, fobj_self_klass), fobj_pass_params(meth, __VA_ARGS__)) + fobj__nm_invoke(meth)(fobj__nm_cb(meth)(self, fobj_self_klass, true), fobj_pass_params(meth, __VA_ARGS__)) #define fobj_call_super(meth, _klassh, self, ...) \ - fobj__nm_invoke(meth)(fobj__nm_cb(meth)(self, _klassh), fobj_pass_params(meth, __VA_ARGS__)) + fobj__nm_invoke(meth)(fobj__nm_cb(meth)(self, _klassh, true), fobj_pass_params(meth, __VA_ARGS__)) #define fobj_iface_call(meth, iface, ...) \ fobj_call(meth, (fobj_t)(iface).fobj__nm_has(meth), __VA_ARGS__) @@ -563,16 +556,13 @@ fobj__disposed(fobj_t self) { fobj__nm_invoke(meth), __VA_ARGS__) #define fobj__ifdef_impl(assignment, meth, self_, cb, cb_meth, cb_meth_t, \ invoke_meth__params, ...) ({ \ - cb_meth_t cb = cb_meth(self_, fobj_self_klass); \ + cb_meth_t cb = cb_meth(self_, fobj_self_klass, false); \ if (cb.impl != NULL) { \ assignment invoke_meth__params(cb, fobj_pass_params(meth, __VA_ARGS__)); \ } \ cb.impl != NULL; \ }) -#define fobj_iface_ifdef(assignment, meth, iface, ...) \ - fobj_ifdef(assignment, meth, (fobj_t)(iface).fobj__nm_has(meth), __VA_ARGS__) - /* Named params passing hazzles with optional and defaults */ #define fobj_pass_params(meth, ...) \ @@ -580,27 +570,31 @@ fobj__disposed(fobj_t self) { meth, fobj__nm_params_t(meth), __VA_ARGS__) #define fobj__pass_params_impl_1(meth, meth__params_t, ...) \ ((meth__params_t){fobj__params_defaults(meth)}) -#ifndef __clang__ +#if !defined(NDEBUG) && !defined(__TINYC__) #define fobj__pass_params_impl_0(meth, meth__params_t, ...) \ - ((meth__params_t){\ + ({ \ + fobj__push_ignore_initializer_overrides; \ + (meth__params_t) { \ fobj__params_defaults(meth), \ fm_eval(fm_foreach_comma(fobj__pass_params_each, __VA_ARGS__)) \ + }; \ + fobj__pop_ignore_initializer_overrides; \ }) #else #define fobj__pass_params_impl_0(meth, meth__params_t, ...) \ - ({ \ - fobj__push_ignore_initializer_overrides; \ - meth__params_t _this_is_params = { \ + ((meth__params_t){\ fobj__params_defaults(meth), \ fm_eval(fm_foreach_comma(fobj__pass_params_each, __VA_ARGS__)) \ - }; \ - fobj__pop_ignore_initializer_overrides; \ - _this_is_params; \ }) #endif +#ifndef NDEBUG #define fobj__pass_params_each(param) \ param, fobj__dumb_arg +#else +#define fobj__pass_params_each(param) \ + param +#endif #define fobj__params_defaults(meth) \ fobj__params_defaults_i(meth, fobj__nm_mthdflt(meth)()) \ @@ -611,9 +605,14 @@ fobj__disposed(fobj_t self) { ) #define fobj__params_defaults_impl(...) \ fm_eval(fm_foreach_tuple(fobj__params_defaults_each, __VA_ARGS__)) +#ifndef NDEBUG #define fobj__params_defaults_each(x, ...) \ fm_when(fm_isnt_empty(__VA_ARGS__))( .x = __VA_ARGS__, )\ .fobj__nm_given(x) = fobj__dumb_arg, +#else +#define fobj__params_defaults_each(x, ...) \ + fm_when(fm_isnt_empty(__VA_ARGS__))( .x = __VA_ARGS__, ) +#endif #define fobj_bind(iface, obj) fobj__nm_bind(iface)(obj) @@ -642,7 +641,8 @@ typedef struct fobj__method_callback { } fobj__method_callback_t; extern fobj__method_callback_t fobj_method_search(fobj_t self, fobj_method_handle_t meth, - fobj_klass_handle_t for_child_take_parent); + fobj_klass_handle_t for_child_take_parent, + bool validate); extern bool fobj_method_implements(fobj_t self, fobj_method_handle_t meth); @@ -650,6 +650,9 @@ extern bool fobj_method_implements(fobj_t self, extern void* fobj_klass_method_search(fobj_klass_handle_t klass, fobj_method_handle_t meth); +extern void fobj__validate_args(fobj_method_handle_t meth, fobj_t self, + const char** paramnames, const char *set, size_t cnt); + /* Variable set helpers */ #ifndef NDEBUG diff --git a/src/fu_util/test/obj1.c b/src/fu_util/test/obj1.c index 26a108b37..432f12761 100644 --- a/src/fu_util/test/obj1.c +++ b/src/fu_util/test/obj1.c @@ -19,7 +19,7 @@ fobj_method(ioStatus); fobj_method(fobjGetError); #define iface__ioReader mth(ioRead) -#define iface__ioReadCloser iface__ioReader, mth(ioClose), opt(ioStatus) +#define iface__ioReadCloser iface__ioReader, mth(ioClose, ioStatus) #define iface__obj fobj_iface(ioReadCloser); fobj_iface(ioReader); @@ -232,7 +232,7 @@ int main(int argc, char** argv) { Klass0 *k0 = $alloc(Klass0); aird = bind_ioRead(k0); - ioRead__cb k0_ioRead = fetch_cb_ioRead(k0, fobj_self_klass); + ioRead__cb k0_ioRead = fetch_cb_ioRead(k0, fobj_self_klass, true); for (i = 0; i < benchcnt; i++) { switch (benchmode) { case 0: ioRead(k0, b, 100); break; From e0df06fa8e894c92e2296abcbebd8381eb336384 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 11 Jul 2022 06:25:10 +0300 Subject: [PATCH 028/339] fu update: rework error keywords: make them look like function call --- src/fu_util/fm_util.h | 32 +++++++------ src/fu_util/fo_obj.h | 45 ++++++++++-------- src/fu_util/impl/fo_impl.c | 48 ++++++++++++------- src/fu_util/impl/fo_impl.h | 2 +- src/fu_util/impl/fo_impl2.h | 35 +++++++------- src/utils/file.c | 92 +++++++++++++++++++------------------ 6 files changed, 137 insertions(+), 117 deletions(-) diff --git a/src/fu_util/fm_util.h b/src/fu_util/fm_util.h index 37eb681e6..54eb0b2b8 100644 --- a/src/fu_util/fm_util.h +++ b/src/fu_util/fm_util.h @@ -45,12 +45,14 @@ #define fm__xor_11 0 #define fm_if(x, y, ...) fm_cat(fm__if_, x)(y, __VA_ARGS__) -#define fm_iif(x) fm_cat(fm__if_, x) #define fm__if_1(y, ...) y #define fm__if_0(y, ...) __VA_ARGS__ #define fm_when(x) fm_cat(fm__when_, x) #define fm__when_1(...) __VA_ARGS__ #define fm__when_0(...) +#define fm_iif(x) fm_cat(fm__iif_, x) +#define fm__iif_1(...) __VA_ARGS__ fm_empty +#define fm__iif_0(...) fm_expand #define fm_va_comma(...) \ fm_cat(fm__va_comma_, fm_va_01(__VA_ARGS__))() @@ -58,15 +60,12 @@ #define fm__va_comma_1() , #define fm_or_default(...) \ - fm_cat(fm__or_default_, fm_va_01(__VA_ARGS__))(__VA_ARGS__) -#define fm__or_default_0(...) fm_expand -#define fm__or_default_1(...) __VA_ARGS__ fm_empty + fm_iif(fm_va_01(__VA_ARGS__))(__VA_ARGS__) #define fm__primitive_compare(x, y) fm_is_tuple(COMPARE_##x(COMPARE_##y)(())) #define fm__is_comparable(x) fm_is_tuple(fm_cat(COMPARE_,x)(())) #define fm_not_equal(x, y) \ - fm_iif(fm_and(fm__is_comparable(x),fm__is_comparable(y))) \ - (fm__primitive_compare, 1 fm_empty)(x, y) + fm_if(fm_and(fm__is_comparable(x),fm__is_comparable(y)), fm__primitive_compare, 1 fm_empty)(x, y) #define fm_equal(x, y) \ fm_compl(fm_not_equal(x, y)) @@ -133,10 +132,11 @@ // recursion handle #define fm_defer(id) id fm_empty() -#define fm_recurs(id) id fm_defer(fm_empty)() +#define fm_recurs(id) id fm_empty fm_empty() () +#define fm_recurs2(a,b) fm_cat fm_empty fm_empty() () (a,b) #if __STRICT_ANSI__ -#define fm__is_emptyfirst(x, ...) fm_iif(fm_is_tuple(x))(0, fm__is_emptyfirst_impl(x)) +#define fm__is_emptyfirst(x, ...) fm_if(fm_is_tuple(x), 0, fm__is_emptyfirst_impl(x)) #define fm__is_emptyfirst_impl(x,...) fm_tuple_2((\ fm__is_emptyfirst_do1 x (fm__is_emptyfirst_do2), 1, 0)) #define fm__is_emptyfirst_do1(F) F() @@ -161,7 +161,7 @@ #define fm_foreach(macro, ...) \ fm_when(fm_va_01(__VA_ARGS__))( \ fm_apply_1(macro, __VA_ARGS__) \ - fm_recurs(fm_cat) (fm_, foreach) (\ + fm_recurs2(fm_, foreach) (\ macro, fm_tail(__VA_ARGS__) \ ) \ ) @@ -169,17 +169,16 @@ #define fm_foreach_arg(macro, arg, ...) \ fm_when(fm_va_01(__VA_ARGS__))( \ fm_apply_2(macro, arg, __VA_ARGS__) \ - fm_recurs(fm_cat) (fm_, foreach_arg) (\ + fm_recurs2(fm_, foreach_arg) (\ macro, arg, fm_tail(__VA_ARGS__) \ ) \ ) -#define fm_catx(x, y) fm_cat_impl(x, y) #define fm_foreach_comma(macro, ...) \ fm_when(fm_va_01(__VA_ARGS__))( \ fm_apply_1(macro, __VA_ARGS__\ )fm_if(fm_va_single(__VA_ARGS__), , fm__comma)\ - fm_recurs(fm_catx) (fm_, foreach_comma) (\ + fm_recurs2(fm_, foreach_comma) (\ macro, fm_tail(__VA_ARGS__) \ ) \ ) @@ -188,7 +187,7 @@ #define fm_foreach_tuple(macro, ...) \ fm_when(fm_va_01(__VA_ARGS__))( \ fm_apply_tuple_1(macro, __VA_ARGS__) \ - fm_recurs(fm_cat) (fm_, foreach_tuple) (\ + fm_recurs2(fm_, foreach_tuple) (\ macro, fm_tail(__VA_ARGS__) \ ) \ ) @@ -196,7 +195,7 @@ #define fm_foreach_tuple_arg(macro, arg, ...) \ fm_when(fm_va_01(__VA_ARGS__))( \ fm_apply_tuple_2(macro, arg, __VA_ARGS__) \ - fm_recurs(fm_cat) (fm_, foreach_tuple_arg) (\ + fm_recurs2(fm_, foreach_tuple_arg) (\ macro, arg, fm_tail(__VA_ARGS__) \ ) \ ) @@ -205,7 +204,7 @@ fm_when(fm_va_01(__VA_ARGS__))( \ fm_apply_tuple_1(macro, __VA_ARGS__\ )fm_if(fm_va_single(__VA_ARGS__), fm_empty(), fm__comma)\ - fm_recurs(fm_cat) (fm_, foreach_tuple_comma) (\ + fm_recurs2(fm_, foreach_tuple_comma) (\ macro, fm_tail(__VA_ARGS__) \ ) \ ) @@ -214,6 +213,9 @@ #define fm_eval_foreach(macro, ...) \ fm_eval(fm_foreach(macro, __VA_ARGS__)) +#define fm_eval_foreach_comma(macro, ...) \ + fm_eval(fm_foreach_comma(macro, __VA_ARGS__)) + #define fm_eval_foreach_arg(macro, arg, ...) \ fm_eval(fm_foreach_arg(macro, arg, __VA_ARGS__)) diff --git a/src/fu_util/fo_obj.h b/src/fu_util/fo_obj.h index 731aec63b..6e196b91c 100644 --- a/src/fu_util/fo_obj.h +++ b/src/fu_util/fo_obj.h @@ -194,21 +194,10 @@ extern fobj_t fobj_swap(fobj_t* var, fobj_t newval); /* * fobjDispose should finish all object's activity and release resources. - * It is called automatically before destroying object, but could be - * called manually as well using `fobj_dispose` function. `fobjDispose` could - * not be called directly. - * Therefore after fobjDispose object should be accessible, ie call of any - * method should not be undefined. But it should not be usable, ie should - * not do any meaningful job. + * It is called automatically before destroying object. */ #define mth__fobjDispose void fobj__special_void_method(fobjDispose); -#define $dispose(obj) fobj_dispose(obj) -extern void fobj_dispose(fobj_t); - -/* check if object is disposing or was disposed */ -extern bool fobj_disposing(fobj_t); -extern bool fobj_disposed(fobj_t); /* * returns globally allocated klass name. @@ -474,7 +463,7 @@ typedef struct fobjBool { bool b; } fobjBool; -ft_inline fobjBool* fobj_bool(bool f); +extern fobjBool* fobj_bool(bool f); #define $B(f) fobj_bool(f) #define kls__fobjBool mth(fobjRepr, fobjFormat) @@ -507,7 +496,10 @@ extern fobjStr* fobj_printkv(const char *fmt, ft_slc_fokv_t kv); * ERRORS */ -#define iface__err +#define mth___fobjErr_marker_DONT_IMPLEMENT_ME void +fobj__special_void_method(_fobjErr_marker_DONT_IMPLEMENT_ME); + +#define iface__err mth(_fobjErr_marker_DONT_IMPLEMENT_ME) fobj_iface(err); #define fobj_error_kind(err) fobj__error_kind(err) @@ -526,8 +518,8 @@ fobj_error_kind(SysErr); fobj_error_object_key(cause); fobj_error_int_key(errNo); -fobj_error_cstr_key(errStr); -#define fobj_errno_keys(errno) (errNo, errno), (errStr, ft_strerror(errno)) +fobj_error_cstr_key(errNoStr); +#define fobj_errno_keys(errno) (errNo, errno), (errNoStr, ft_strerror(errno)) fobj_error_cstr_key(path); fobj_error_cstr_key(old_path); fobj_error_cstr_key(new_path); @@ -541,19 +533,32 @@ fobj_error_cstr_key(__msgSuffix); * $err(Type, "Some bad thing happens at {path}", (path, filename)) */ #define $err(type, ...) fobj_make_err(type, __VA_ARGS__) +/* + * $noerr() - empty error + * $noerr(err) - true, if $isNULL(err) + */ #define $noerr(...) fm_if(fm_va_01(__VA_ARGS__), $isNULL(__VA_ARGS__), $null(err)) +/* + * $haserr(err) - true if $notNULL(err) + */ #define $haserr(err) $notNULL(err) /* - * $syserr() - * $syserr("allocation error") - * $syserr("Could not open file {path}", (path, filename)) + * $syserr(errno) + * $syserr(errno, "allocation error") + * $syserr(errno, "Could not open file {path}", (path, filename)) */ -#define $syserr(...) fobj_make_syserr(__VA_ARGS__) +#define $syserr(erno, ...) fobj_make_syserr((erno), __VA_ARGS__) /* fetch key back */ #define $errkey(key, err, ...) fobj__err_getkey(key, err, __VA_ARGS__) +/* + * Get errno stored in `errNo` error key + */ ft_inline int getErrno(err_i err); +/* + * Get errno string stored in `errNoStr` error key + */ ft_inline const char* getErrnoStr(err_i err); /* diff --git a/src/fu_util/impl/fo_impl.c b/src/fu_util/impl/fo_impl.c index cc85510c0..fab4cffee 100644 --- a/src/fu_util/impl/fo_impl.c +++ b/src/fu_util/impl/fo_impl.c @@ -511,8 +511,8 @@ fobj__do_dispose(fobj_t self, fobj_header_t *h, fobj_klass_registration_t *kreg) } } -void -fobj_dispose(fobj_t self) { +static void +fobj_release(fobj_t self) { fobj_header_t *h; fobj_klass_handle_t klass; fobj_klass_registration_t *kreg; @@ -528,11 +528,17 @@ fobj_dispose(fobj_t self) { ft_dbg_assert(klass > 0 && klass <= atload(&fobj_klasses_n)); kreg = &fobj_klasses[klass]; + + if (__atomic_sub_fetch(&h->rc, 1, __ATOMIC_ACQ_REL) != 0) + return; + if ((atload(&h->flags) & FOBJ_DISPOSING) != 0) + return; fobj__do_dispose(self, h, kreg); } -static void -fobj_release(fobj_t self) { +#if 0 +void +fobj_dispose(fobj_t self) { fobj_header_t *h; fobj_klass_handle_t klass; fobj_klass_registration_t *kreg; @@ -548,11 +554,6 @@ fobj_release(fobj_t self) { ft_dbg_assert(klass > 0 && klass <= atload(&fobj_klasses_n)); kreg = &fobj_klasses[klass]; - - if (__atomic_sub_fetch(&h->rc, 1, __ATOMIC_ACQ_REL) != 0) - return; - if ((atload(&h->flags) & FOBJ_DISPOSING) != 0) - return; fobj__do_dispose(self, h, kreg); } @@ -580,6 +581,8 @@ fobj_disposed(fobj_t self) { return (atload(&h->flags) & FOBJ_DISPOSED) != 0; } +#endif + static fobj_klass_handle_t fobjBase_fobjKlass(fobj_t self) { return fobj_real_klass_of(self); @@ -844,7 +847,7 @@ fobjUInt_fobjFormat(VSelf, ft_strbuf_t *buf, const char *fmt) { static fobjStr* fobjFloat_fobjRepr(VSelf) { Self(fobjFloat); - return fobj_sprintf("%f", self->f); + return fobj_sprintf("$F(%f)", self->f); } static void @@ -865,10 +868,15 @@ fobjFloat_fobjFormat(VSelf, ft_strbuf_t *buf, const char *fmt) { fobj_format_float(buf, self->f, fmt); } -static fobjStr* trueRepr = NULL; -static fobjStr* falseRepr = NULL; -static fobjStr* trueStr = NULL; -static fobjStr* falseStr = NULL; +static fobjBool* fobjTrue = NULL; +static fobjBool* fobjFalse = NULL; +static fobjStr* trueRepr = NULL; +static fobjStr* falseRepr = NULL; + +fobjBool* +fobj_bool(bool b) { + return b ? fobjTrue : fobjFalse; +} static fobjStr* fobjBool_fobjRepr(VSelf) { @@ -1080,6 +1088,10 @@ fobj__make_err(const char *type, return bind_err(err); } +static void +fobjErr__fobjErr_marker_DONT_IMPLEMENT_ME(VSelf) { +} + static void fobjErr_fobjDispose(VSelf) { Self(fobjErr); @@ -1263,7 +1275,7 @@ fobjBase__kh(void) { return khandle; } -fobj_klass_handle(fobjErr, mth(fobjRepr), varsized(kv)); +fobj_klass_handle(fobjErr, mth(fobjRepr, _fobjErr_marker_DONT_IMPLEMENT_ME), varsized(kv)); fobj_klass_handle(fobjStr, mth(fobjDispose), varsized(_buf)); fobj_klass_handle(fobjInt); fobj_klass_handle(fobjUInt); @@ -1298,8 +1310,10 @@ fobj_init(void) { FOBJ_FUNC_ARP(); - falseStr = $ref($S("false")); - trueStr = $ref($S("true")); + fobjTrue = $alloc(fobjBool, .b = true); + fobjFalse = $alloc(fobjBool, .b = false); + falseRepr = $ref($S("$B(false)")); + trueRepr = $ref($S("$B(true)")); } void diff --git a/src/fu_util/impl/fo_impl.h b/src/fu_util/impl/fo_impl.h index 36597320c..9ef28dde7 100644 --- a/src/fu_util/impl/fo_impl.h +++ b/src/fu_util/impl/fo_impl.h @@ -254,7 +254,7 @@ typedef struct { #define fobj__special_void_method(meth) \ \ - static ft_unused fobj_method_handle_t fobj__nm_mhandle(meth) (void) { \ + ft_inline ft_gcc_const fobj_method_handle_t fobj__nm_mhandle(meth) (void) { \ static volatile fobj_method_handle_t hndl = 0; \ fobj_method_handle_t h = hndl; \ if (h) return h; \ diff --git a/src/fu_util/impl/fo_impl2.h b/src/fu_util/impl/fo_impl2.h index 093260ae0..1d42acc18 100644 --- a/src/fu_util/impl/fo_impl2.h +++ b/src/fu_util/impl/fo_impl2.h @@ -84,11 +84,6 @@ fobj_float(double f) { return $alloc(fobjFloat, .f = f); } -ft_inline fobjBool* -fobj_bool(bool b) { - return $alloc(fobjBool, .b = b); -} - typedef struct fobjErr fobjErr; struct fobjErr { const char* type; @@ -117,22 +112,24 @@ struct fobjErr { kvs, ft_arrsz(kvs)); \ }) -#define fobj_make_syserr( ...) \ - fm_cat(fobj_make_syserr_, fm_va_01(__VA_ARGS__))(__VA_ARGS__) -#define fobj_make_syserr_0(...) ({ \ +#define fobj_make_syserr(erno_, ...) \ + fm_cat(fobj_make_syserr_, fm_va_01(__VA_ARGS__))((erno_), fm_uniq(erno), __VA_ARGS__) +#define fobj_make_syserr_0(erno_, erno, ...) ({ \ + int erno = erno_; \ fobj_err_kv_t kvs[] = { \ - {"errNo", ft_mka_i(errno)}, \ - {"errStr", ft_mka_s((char*)ft_strerror(errno))}, \ + {"errNo", ft_mka_i(erno)}, \ + {"errNoStr", ft_mka_s((char*)ft_strerror(erno))}, \ }; \ fobj__make_err(fobj_error_kind_SysErr(), \ - ft__srcpos(), "System Error: {errStr}", \ + ft__srcpos(), "System Error: {errNoStr}", \ kvs, ft_arrsz(kvs));\ }) -#define fobj_make_syserr_1(msg, ...) ({ \ +#define fobj_make_syserr_1(erno_, erno, msg, ...) ({ \ + int erno = erno_; \ fobj_err_kv_t kvs[] = { \ - {"errNo", ft_mka_i(errno)}, \ - {"errStr", ft_mka_s((char*)ft_strerror(errno))}, \ - {"__msgSuffix", ft_mka_s((char*)": {errStr}")}, \ + {"errNo", ft_mka_i(erno)}, \ + {"errNoStr", ft_mka_s((char*)ft_strerror(erno))}, \ + {"__msgSuffix", ft_mka_s((char*)": {errNoStr}")}, \ fobj__err_transform_kv(__VA_ARGS__) \ }; \ fobj__make_err(fobj_error_kind_SysErr(), \ @@ -146,10 +143,10 @@ extern err_i fobj__make_err(const char *type, fobj_err_kv_t *kvs, size_t kvn); -#define fobj__err_transform_kv_do(key, ...) \ - fobj__err_mkkv_##key(__VA_ARGS__) +#define fobj__err_transform_kv_do(v) \ + fobj__err_mkkv_##v #define fobj__err_transform_kv(...) \ - fm_eval_tuples_comma(fobj__err_transform_kv_do, __VA_ARGS__) + fm_eval_foreach_comma(fobj__err_transform_kv_do, __VA_ARGS__) #define fobj__err_getkey(key, err, ...) \ fobj__err_getkv_##key(err, fm_or_default(__VA_ARGS__)(NULL)) @@ -161,7 +158,7 @@ getErrno(err_i err) { ft_inline const char* getErrnoStr(err_i err) { - return $errkey(errStr, err); + return $errkey(errNoStr, err); } ft_inline const char* diff --git a/src/utils/file.c b/src/utils/file.c index 7eb2c4329..dd9e9f0d8 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -3544,11 +3544,10 @@ common_pioExists(fobj_t self, path_t path, err_i *err) return false; } if ($noerr(*err) && !S_ISREG(buf.st_mode)) - *err = $err(SysErr, "File {path:q} is not regular", (path, path)); + *err = $err(SysErr, "File {path:q} is not regular", path(path)); if ($haserr(*err)) { - *err = $err(SysErr, "Could not check file existance: {cause:$M}", - (cause, (*err).self), (errNo, getErrno(*err)), - (errStr, getErrnoStr(*err))); + *err = $syserr(getErrno(*err), "Could not check file existance: {cause:$M}", + cause((*err).self)); } return $noerr(*err); } @@ -3569,7 +3568,7 @@ pioLocalDrive_pioOpen(VSelf, path_t path, int flags, fd = open(path, flags, permissions); if (fd < 0) { - *err = $syserr("Cannot open file {path:q}", (path, path)); + *err = $syserr(errno, "Cannot open file {path:q}", path(path)); return (pioFile_i){NULL}; } @@ -3587,7 +3586,7 @@ pioLocalDrive_pioStat(VSelf, path_t path, bool follow_symlink, err_i *err) r = follow_symlink ? stat(path, &st) : lstat(path, &st); if (r < 0) - *err = $syserr("Cannot stat file {path:q}", (path, path)); + *err = $syserr(errno, "Cannot stat file {path:q}", path(path)); return st; } @@ -3599,7 +3598,7 @@ pioLocalDrive_pioRemove(VSelf, path_t path, bool missing_ok) if (remove_file_or_dir(path) != 0) { if (!missing_ok || errno != ENOENT) - return $syserr("Cannot remove {path:q}", (path, path)); + return $syserr(errno, "Cannot remove {path:q}", path(path)); } return $noerr(); } @@ -3608,8 +3607,8 @@ static err_i pioLocalDrive_pioRename(VSelf, path_t old_path, path_t new_path) { if (rename(old_path, new_path) != 0) - return $syserr("Cannot rename file {old_path:q} to {new_path:q}", - (old_path, old_path), (new_path, new_path)); + return $syserr(errno, "Cannot rename file {old_path:q} to {new_path:q}", + old_path(old_path), new_path(new_path)); return $noerr(); } @@ -3646,11 +3645,13 @@ pioLocalFile_pioClose(VSelf, bool sync) { r = fsync(self->fd); if (r < 0) - err = $syserr("Cannot fsync file {path:q}", (path, self->p.path)); + err = $syserr(errno, "Cannot fsync file {path:q}", + path(self->p.path)); } r = close(self->fd); if (r < 0 && $isNULL(err)) - err = $syserr("Cannot close file {path:q}", (path, self->p.path)); + err = $syserr(errno, "Cannot close file {path:q}", + path(self->p.path)); self->fd = -1; self->p.closed = true; return err; @@ -3668,7 +3669,8 @@ pioLocalFile_pioRead(VSelf, ft_bytes_t buf, err_i *err) r = read(self->fd, buf.ptr, buf.len); if (r < 0) { - *err = $syserr("Cannot read from {path:q}", (path, self->p.path)); + *err = $syserr(errno, "Cannot read from {path:q}", + path(self->p.path)); return 0; } return r; @@ -3689,13 +3691,14 @@ pioLocalFile_pioWrite(VSelf, ft_bytes_t buf, err_i *err) r = durable_write(self->fd, buf.ptr, buf.len); if (r < 0) { - *err = $syserr("Cannot write to file {path:q}", (path, self->p.path)); + *err = $syserr(errno, "Cannot write to file {path:q}", + path(self->p.path)); return 0; } if (r < buf.len) { *err = $err(SysErr, "Short write on {path:q}: {writtenSz} < {wantedSz}", - (path, self->p.path), (writtenSz, r), (wantedSz, buf.len)); + path(self->p.path), writtenSz(r), wantedSz(buf.len)); } return r; } @@ -3716,7 +3719,8 @@ pioLocalFile_pioTruncate(VSelf, size_t sz) ft_assert(self->fd >= 0, "Closed file abused \"%s\"", self->p.path); if (ftruncate(self->fd, sz) < 0) - return $syserr("Cannot truncate file {path:q}", (path, self->p.path)); + return $syserr(errno, "Cannot truncate file {path:q}", + path(self->p.path)); return $noerr(); } @@ -3761,8 +3765,8 @@ pioRemoteDrive_pioOpen(VSelf, path_t path, if (hdr.arg != 0) { - errno = (int)hdr.arg; - *err = $syserr("Cannot open remote file {path:q}", (path, path)); + *err = $syserr((int)hdr.arg, "Cannot open remote file {path:q}", + path(path)); fio_fdset &= ~(1 << hdr.handle); return (pioFile_i){NULL}; } @@ -3792,8 +3796,8 @@ pioRemoteDrive_pioStat(VSelf, path_t path, bool follow_symlink, err_i *err) if (hdr.arg != 0) { - errno = (int)hdr.arg; - *err = $syserr("Cannot stat remote file {path:q}", (path, path)); + *err = $syserr((int)hdr.arg, "Cannot stat remote file {path:q}", + path(path)); } return st; } @@ -3818,8 +3822,8 @@ pioRemoteDrive_pioRemove(VSelf, path_t path, bool missing_ok) if (hdr.arg != 0) { - errno = (int)hdr.arg; - return $syserr("Cannot remove remote file {path:q}", (path, path)); + return $syserr((int)hdr.arg, "Cannot remove remote file {path:q}", + path(path)); } return $noerr(); } @@ -3845,9 +3849,8 @@ pioRemoteDrive_pioRename(VSelf, path_t old_path, path_t new_path) if (hdr.arg != 0) { - errno = (int)hdr.arg; - return $syserr("Cannot rename remote file {old_path:q} to {new_path:q}", - (old_path, old_path), (new_path, new_path)); + return $syserr((int)hdr.arg, "Cannot rename remote file {old_path:q} to {new_path:q}", + old_path(old_path), new_path(new_path)); } return $noerr(); } @@ -3901,8 +3904,8 @@ pioRemoteFile_pioSync(VSelf) if (hdr.arg != 0) { - errno = (int)hdr.arg; - return $syserr("Cannot fsync remote file {path:q}", (path, self->p.path)); + return $syserr((int)hdr.arg, "Cannot fsync remote file {path:q}", + path(self->p.path)); } return $noerr(); } @@ -3935,8 +3938,8 @@ pioRemoteFile_pioClose(VSelf, bool sync) if (hdr.arg != 0 && $isNULL(err)) { - errno = (int)hdr.arg; - err = $syserr("Cannot close remote file {path:q}", (path, self->p.path)); + err = $syserr((int)hdr.arg, "Cannot close remote file {path:q}", + path(self->p.path)); } self->p.closed = true; @@ -4013,17 +4016,16 @@ pioRemoteFile_pioAsyncRead(VSelf, ft_bytes_t buf, err_i *err) { ft_assert(hdr.size < CHUNK_SIZE); IO_CHECK(fio_read_all(fio_stdin, self->asyncChunk, hdr.size), hdr.size); - errno = erno; ft_assert(((char*)self->asyncChunk)[hdr.size] == 0); - *err = $syserr("Cannot async read remote file {path:q}: {remotemsg}", - (remotemsg, self->asyncChunk), - (path, self->p.path)); + *err = $syserr(erno, "Cannot async read remote file {path:q}: {remotemsg}", + remotemsg(self->asyncChunk), + path(self->p.path)); break; } else { - errno = erno; - *err = $syserr("Cannot async read remote file {path:q}", (path, self->p.path)); + *err = $syserr(erno, "Cannot async read remote file {path:q}", + path(self->p.path)); } fio_disconnect(); /* discard possible pending data in pipe */ break; @@ -4078,8 +4080,8 @@ pioRemoteFile_pioRead(VSelf, ft_bytes_t buf, err_i *err) ft_dbg_assert(hdr.cop == FIO_SEND); IO_CHECK(fio_read_all(fio_stdin, buf.ptr, hdr.size), hdr.size); if (hdr.arg != 0) { - errno = (int)hdr.arg; - *err = $syserr("Cannot read remote file {path:q}", (path, self->p.path)); + *err = $syserr((int)hdr.arg, "Cannot read remote file {path:q}", + path(self->p.path)); return 0; } @@ -4117,8 +4119,8 @@ pioRemoteFile_pioWrite(VSelf, ft_bytes_t buf, err_i *err) /* set errno */ if (hdr.arg != 0) { - errno = (int)hdr.arg; - *err = $syserr("Cannot write remote file {path:q}", (path, self->p.path)); + *err = $syserr((int)hdr.arg, "Cannot write remote file {path:q}", + path(self->p.path)); return 0; } @@ -4233,7 +4235,7 @@ pioRemoteFile_pioAsyncError(VSelf) errmsg = pgut_malloc(ERRMSG_MAX_LEN); IO_CHECK(fio_read_all(fio_stdin, errmsg, hdr.size), hdr.size); - self->asyncError = $err(SysErr, "{remotemsg}", (remotemsg, errmsg)); + self->asyncError = $err(SysErr, "{remotemsg}", remotemsg(errmsg)); self->didAsync = false; free(errmsg); return self->asyncError; @@ -4430,7 +4432,7 @@ pioWriteFilter_pioWrite(VSelf, ft_bytes_t rbuf, err_i *err) if (rbuf.len) { *err = $err(SysErr, "short write: {writtenSz} < {wantedSz}", - (writtenSz, rlen - rbuf.len), (wantedSz, rbuf.len)); + writtenSz(rlen - rbuf.len), wantedSz(rbuf.len)); } return rlen - rbuf.len; } @@ -4516,10 +4518,10 @@ newGZError(const char *gzmsg, int gzerrno) if (gzerrno == Z_OK && errno == 0) return $noerr(); if (gzerrno == Z_ERRNO) { - return $syserr("System error during GZ"); + return $syserr(errno, "System error during GZ"); } - return $err(GZ, "GZ error: {gzErrStr}", (gzErrStr, gzmsg), (gzErrNo, gzerrno)); + return $err(GZ, "GZ error: {gzErrStr}", gzErrStr(gzmsg), gzErrNo(gzerrno)); } pioFilter_i @@ -4800,8 +4802,8 @@ pioCopyWithFilters(pioWriteFlush_i dest, pioRead_i src, $ireturn(err); $ireturn($err(SysErr, "Short write to destination file {path}: {writtenSz} < {wantedSz}", - (path, $irepr(dest)->ptr), - (wantedSz, read_len), (writtenSz, write_len))); + path($irepr(dest)->ptr), + wantedSz(read_len), writtenSz(write_len))); } } @@ -4809,7 +4811,7 @@ pioCopyWithFilters(pioWriteFlush_i dest, pioRead_i src, err = $i(pioFlush, dest); if ($haserr(err)) $ireturn($err(SysErr, "Cannot flush file {path}: {cause}", - (path, $irepr(dest)->ptr), (cause, err.self))); + path($irepr(dest)->ptr), cause(err.self))); return $noerr(); } From e499bcdf8a567944cb59baaae33ce6e567dfd6c5 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 11 Jul 2022 06:33:25 +0300 Subject: [PATCH 029/339] file.c: return back close on dispose but in a gentle way --- src/utils/file.c | 80 ++++++++++++++++++++++++++++++++---------------- 1 file changed, 54 insertions(+), 26 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index dd9e9f0d8..53ab451f8 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -3631,6 +3631,17 @@ pioLocalDrive_pioIsRemote(VSelf) } /* LOCAL FILE */ +static void +pioLocalFile_fobjDispose(VSelf) +{ + Self(pioLocalFile); + if (!self->p.closed) + { + close(self->fd); + self->fd = -1; + self->p.closed = true; + } +} static err_i pioLocalFile_pioClose(VSelf, bool sync) @@ -3911,40 +3922,49 @@ pioRemoteFile_pioSync(VSelf) } static err_i -pioRemoteFile_pioClose(VSelf, bool sync) +pioRemoteFile_doClose(VSelf) { - Self(pioRemoteFile); - err_i err = $noerr(); - fio_header hdr; + Self(pioRemoteFile); + err_i err = $noerr(); + fio_header hdr; - ft_assert(self->handle >= 0, "Remote closed file abused \"%s\"", self->p.path); + hdr = (fio_header){ + .cop = FIO_CLOSE, + .handle = self->handle, + .size = 0, + .arg = 0, + }; - if (sync && (self->p.flags & O_ACCMODE) != O_RDONLY) - err = pioRemoteFile_pioSync(self); + fio_fdset &= ~(1 << hdr.handle); + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - hdr = (fio_header){ - .cop = FIO_CLOSE, - .handle = self->handle, - .size = 0, - .arg = 0, - }; + /* Wait for response */ + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + ft_dbg_assert(hdr.cop == FIO_CLOSE); - fio_fdset &= ~(1 << hdr.handle); - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + if (hdr.arg != 0 && $isNULL(err)) + { + err = $syserr((int)hdr.arg, "Cannot close remote file {path:q}", + path(self->p.path)); + } - /* Wait for response */ - IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); - ft_dbg_assert(hdr.cop == FIO_CLOSE); + self->p.closed = true; - if (hdr.arg != 0 && $isNULL(err)) - { - err = $syserr((int)hdr.arg, "Cannot close remote file {path:q}", - path(self->p.path)); - } + return err; +} - self->p.closed = true; +static err_i +pioRemoteFile_pioClose(VSelf, bool sync) +{ + Self(pioRemoteFile); + err_i err = $noerr(); - return err; + ft_assert(self->handle >= 0, "Remote closed file abused \"%s\"", self->p.path); + + if (sync && (self->p.flags & O_ACCMODE) != O_RDONLY) + err = pioRemoteFile_pioSync(self); + + return fobj_err_combine(err, pioRemoteFile_doClose(self)); } static size_t @@ -4245,6 +4265,14 @@ static void pioRemoteFile_fobjDispose(VSelf) { Self(pioRemoteFile); + if (!self->p.closed) + { + err_i err; + + err = pioRemoteFile_doClose(self); + if ($haserr(err)) + elog(WARNING, "%s", $errmsg(err)); + } $idel(&self->asyncError); ft_free(self->asyncChunk); } @@ -4818,7 +4846,7 @@ pioCopyWithFilters(pioWriteFlush_i dest, pioRead_i src, fobj_klass_handle(pioFile); fobj_klass_handle(pioLocalDrive); fobj_klass_handle(pioRemoteDrive); -fobj_klass_handle(pioLocalFile, inherits(pioFile), mth(fobjRepr)); +fobj_klass_handle(pioLocalFile, inherits(pioFile), mth(fobjDispose, fobjRepr)); fobj_klass_handle(pioRemoteFile, inherits(pioFile), mth(fobjDispose, fobjRepr)); fobj_klass_handle(pioWriteFilter, mth(fobjDispose, fobjRepr)); fobj_klass_handle(pioReadFilter, mth(fobjDispose, fobjRepr)); From 512488fd1a08152544b05eccfc0e9d1d16f7beb2 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 14 Jul 2022 18:27:28 +0300 Subject: [PATCH 030/339] fu_util: fix ft_strdup --- src/fu_util/impl/ft_impl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/fu_util/impl/ft_impl.h b/src/fu_util/impl/ft_impl.h index 67d637e41..b37e386f8 100644 --- a/src/fu_util/impl/ft_impl.h +++ b/src/fu_util/impl/ft_impl.h @@ -321,7 +321,7 @@ ft_strdup(ft_str_t str) { if (str.ptr != NULL) memcpy(mem, str.ptr, str.len+1); else - mem[0] = '0'; + mem[0] = '\0'; str.ptr = mem; return str; } From 822111e278afb1318a9ae98cab954f971129606c Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Tue, 19 Jul 2022 14:58:51 +0300 Subject: [PATCH 031/339] [PBCKP-232] remove 9.5-9.6 support, part 1 --- .travis.yml | 1 + README.md | 72 +++++++-------- src/backup.c | 10 --- src/catchup.c | 8 -- src/parsexlog.c | 5 -- src/pg_probackup.c | 7 +- src/pg_probackup.h | 8 -- src/stream.c | 42 ++------- src/util.c | 42 +-------- src/utils/pgut.c | 18 +--- tests/backup.py | 149 ++++++++------------------------ tests/checkdb.py | 60 +------------ tests/helpers/ptrack_helpers.py | 61 ++++--------- 13 files changed, 100 insertions(+), 383 deletions(-) diff --git a/.travis.yml b/.travis.yml index 26b2bc4e2..ed932b68e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -52,6 +52,7 @@ env: jobs: allow_failures: - if: env(PG_BRANCH) = master + - if: env(PG_BRANCH) = REL9_6_STABLE - if: env(PG_BRANCH) = REL9_5_STABLE # - if: env(MODE) IN (archive, backup, delta, locking, merge, replica, retention, restore) diff --git a/README.md b/README.md index 5da8d199e..281116cce 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ `pg_probackup` is a utility to manage backup and recovery of PostgreSQL database clusters. It is designed to perform periodic backups of the PostgreSQL instance that enable you to restore the server in case of a failure. The utility is compatible with: -* PostgreSQL 9.6, 10, 11, 12, 13, 14; +* PostgreSQL 10, 11, 12, 13, 14; As compared to other backup solutions, `pg_probackup` offers the following benefits that can help you implement different backup strategies and deal with large amounts of data: * Incremental backup: page-level incremental backup allows you to save disk space, speed up backup and restore. With three different incremental modes, you can plan the backup strategy in accordance with your data flow. @@ -74,62 +74,62 @@ Installers are available in release **assets**. [Latests](https://github.com/pos #DEB Ubuntu|Debian Packages sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{14,13,12,11,10,9.6} -sudo apt-get install pg-probackup-{14,13,12,11,10,9.6}-dbg +sudo apt-get install pg-probackup-{14,13,12,11,10} +sudo apt-get install pg-probackup-{14,13,12,11,10}-dbg #DEB-SRC Packages sudo sh -c 'echo "deb-src [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" >>\ /etc/apt/sources.list.d/pg_probackup.list' && sudo apt-get update -sudo apt-get source pg-probackup-{14,13,12,11,10,9.6} +sudo apt-get source pg-probackup-{14,13,12,11,10} #DEB Astra Linix Orel sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ stretch main-stretch" > /etc/apt/sources.list.d/pg_probackup.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{14,13,12,11,10,9.6}{-dbg,} +sudo apt-get install pg-probackup-{14,13,12,11,10}{-dbg,} #RPM Centos Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-centos.noarch.rpm -yum install pg_probackup-{14,13,12,11,10,9.6} -yum install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{14,13,12,11,10} +yum install pg_probackup-{14,13,12,11,10}-debuginfo #RPM RHEL Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-rhel.noarch.rpm -yum install pg_probackup-{14,13,12,11,10,9.6} -yum install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{14,13,12,11,10} +yum install pg_probackup-{14,13,12,11,10}-debuginfo #RPM Oracle Linux Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-oraclelinux.noarch.rpm -yum install pg_probackup-{14,13,12,11,10,9.6} -yum install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{14,13,12,11,10} +yum install pg_probackup-{14,13,12,11,10}-debuginfo #SRPM Centos|RHEL|OracleLinux Packages -yumdownloader --source pg_probackup-{14,13,12,11,10,9.6} +yumdownloader --source pg_probackup-{14,13,12,11,10} #RPM SUSE|SLES Packages zypper install --allow-unsigned-rpm -y https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-suse.noarch.rpm -zypper --gpg-auto-import-keys install -y pg_probackup-{14,13,12,11,10,9.6} -zypper install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +zypper --gpg-auto-import-keys install -y pg_probackup-{14,13,12,11,10} +zypper install pg_probackup-{14,13,12,11,10}-debuginfo #SRPM SUSE|SLES Packages -zypper si pg_probackup-{14,13,12,11,10,9.6} +zypper si pg_probackup-{14,13,12,11,10} #RPM ALT Linux 7 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p7 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{14,13,12,11,10} +sudo apt-get install pg_probackup-{14,13,12,11,10}-debuginfo #RPM ALT Linux 8 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p8 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{14,13,12,11,10} +sudo apt-get install pg_probackup-{14,13,12,11,10}-debuginfo #RPM ALT Linux 9 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p9 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{14,13,12,11,10} +sudo apt-get install pg_probackup-{14,13,12,11,10}-debuginfo ``` #### pg_probackup for PostgresPro Standard and Enterprise @@ -137,46 +137,46 @@ sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo #DEB Ubuntu|Debian Packages sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup-forks/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup-forks.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup-forks/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{std,ent}-{13,12,11,10,9.6} -sudo apt-get install pg-probackup-{std,ent}-{13,12,11,10,9.6}-dbg +sudo apt-get install pg-probackup-{std,ent}-{13,12,11,10} +sudo apt-get install pg-probackup-{std,ent}-{13,12,11,10}-dbg #DEB Astra Linix Orel sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup-forks/deb/ stretch main-stretch" > /etc/apt/sources.list.d/pg_probackup.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup-forks/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{std,ent}-{12,11,10,9.6}{-dbg,} +sudo apt-get install pg-probackup-{std,ent}-{12,11,10}{-dbg,} #RPM Centos Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-centos.noarch.rpm -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{std,ent}-{13,12,11,10} +yum install pg_probackup-{std,ent}-{13,12,11,10}-debuginfo #RPM RHEL Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-rhel.noarch.rpm -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{std,ent}-{13,12,11,10} +yum install pg_probackup-{std,ent}-{13,12,11,10}-debuginfo #RPM Oracle Linux Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-oraclelinux.noarch.rpm -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{std,ent}-{13,12,11,10} +yum install pg_probackup-{std,ent}-{13,12,11,10}-debuginfo #RPM ALT Linux 7 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p7 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10} +sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10}-debuginfo #RPM ALT Linux 8 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p8 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10} +sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10}-debuginfo #RPM ALT Linux 9 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p9 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' && sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10} +sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10}-debuginfo ``` Once you have `pg_probackup` installed, complete [the setup](https://postgrespro.github.io/pg_probackup/#pbk-install-and-setup). diff --git a/src/backup.c b/src/backup.c index 84b503245..0edb57710 100644 --- a/src/backup.c +++ b/src/backup.c @@ -133,12 +133,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, pg_start_backup(label, smooth_checkpoint, ¤t, nodeInfo, backup_conn); /* Obtain current timeline */ -#if PG_VERSION_NUM >= 90600 current.tli = get_current_timeline(backup_conn); -#else - /* PG-9.5 */ - current.tli = get_current_timeline_from_control(FIO_DB_HOST, instance_config.pgdata, false); -#endif /* * In incremental backup mode ensure that already-validated @@ -1053,7 +1048,6 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup, /* * Switch to a new WAL segment. It should be called only for master. - * For PG 9.5 it should be called only if pguser is superuser. */ void pg_switch_wal(PGconn *conn) @@ -1062,11 +1056,7 @@ pg_switch_wal(PGconn *conn) pg_silent_client_messages(conn); -#if PG_VERSION_NUM >= 100000 res = pgut_execute(conn, "SELECT pg_catalog.pg_switch_wal()", 0, NULL); -#else - res = pgut_execute(conn, "SELECT pg_catalog.pg_switch_xlog()", 0, NULL); -#endif PQclear(res); } diff --git a/src/catchup.c b/src/catchup.c index 385d8e9df..522279ac9 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -66,13 +66,7 @@ catchup_init_state(PGNodeInfo *source_node_info, const char *source_pgdata, cons source_node_info->is_ptrack_enabled = pg_is_ptrack_enabled(source_conn, source_node_info->ptrack_version_num); /* Obtain current timeline */ -#if PG_VERSION_NUM >= 90600 current.tli = get_current_timeline(source_conn); -#else - /* PG-9.5 */ - instance_config.pgdata = source_pgdata; - current.tli = get_current_timeline_from_control(FIO_DB_HOST, source_pgdata, false); -#endif elog(INFO, "Catchup start, pg_probackup version: %s, " "PostgreSQL version: %s, " @@ -1033,7 +1027,6 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, if (!dry_run) wait_wal_and_calculate_stop_lsn(dest_xlog_path, stop_backup_result.lsn, ¤t); -#if PG_VERSION_NUM >= 90600 /* Write backup_label */ Assert(stop_backup_result.backup_label_content != NULL); if (!dry_run) @@ -1061,7 +1054,6 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, stop_backup_result.tablespace_map_content = NULL; stop_backup_result.tablespace_map_content_len = 0; } -#endif /* wait for end of wal streaming and calculate wal size transfered */ if (!dry_run) diff --git a/src/parsexlog.c b/src/parsexlog.c index df9b96fb3..39fb64f0a 100644 --- a/src/parsexlog.c +++ b/src/parsexlog.c @@ -29,13 +29,8 @@ * RmgrNames is an array of resource manager names, to make error messages * a bit nicer. */ -#if PG_VERSION_NUM >= 100000 #define PG_RMGR(symname,name,redo,desc,identify,startup,cleanup,mask) \ name, -#else -#define PG_RMGR(symname,name,redo,desc,identify,startup,cleanup) \ - name, -#endif static const char *RmgrNames[RM_MAX_ID + 1] = { #include "access/rmgrlist.h" diff --git a/src/pg_probackup.c b/src/pg_probackup.c index ff5ab85d3..b0245f864 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -78,10 +78,8 @@ pid_t my_pid = 0; __thread int my_thread_num = 1; bool progress = false; bool no_sync = false; -#if PG_VERSION_NUM >= 100000 char *replication_slot = NULL; bool temp_slot = false; -#endif bool perm_slot = false; /* backup options */ @@ -205,9 +203,7 @@ static ConfigOption cmd_options[] = { 'f', 'b', "backup-mode", opt_backup_mode, SOURCE_CMD_STRICT }, { 'b', 'C', "smooth-checkpoint", &smooth_checkpoint, SOURCE_CMD_STRICT }, { 's', 'S', "slot", &replication_slot, SOURCE_CMD_STRICT }, -#if PG_VERSION_NUM >= 100000 { 'b', 181, "temp-slot", &temp_slot, SOURCE_CMD_STRICT }, -#endif { 'b', 'P', "perm-slot", &perm_slot, SOURCE_CMD_STRICT }, { 'b', 182, "delete-wal", &delete_wal, SOURCE_CMD_STRICT }, { 'b', 183, "delete-expired", &delete_expired, SOURCE_CMD_STRICT }, @@ -905,14 +901,13 @@ main(int argc, char *argv[]) wal_file_name, instanceState->instance_name, instance_config.system_identifier, system_id); } -#if PG_VERSION_NUM >= 100000 if (temp_slot && perm_slot) elog(ERROR, "You cannot specify \"--perm-slot\" option with the \"--temp-slot\" option"); /* if slot name was not provided for temp slot, use default slot name */ if (!replication_slot && temp_slot) replication_slot = DEFAULT_TEMP_SLOT_NAME; -#endif + if (!replication_slot && perm_slot) replication_slot = DEFAULT_PERMANENT_SLOT_NAME; diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 6f6dcdff6..2439fc23b 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -65,13 +65,8 @@ extern const char *PROGRAM_EMAIL; #define DATABASE_DIR "database" #define BACKUPS_DIR "backups" #define WAL_SUBDIR "wal" -#if PG_VERSION_NUM >= 100000 #define PG_XLOG_DIR "pg_wal" #define PG_LOG_DIR "log" -#else -#define PG_XLOG_DIR "pg_xlog" -#define PG_LOG_DIR "pg_log" -#endif #define PG_TBLSPC_DIR "pg_tblspc" #define PG_GLOBAL_DIR "global" #define BACKUP_CONTROL_FILE "backup.control" @@ -777,11 +772,8 @@ extern bool stream_wal; extern bool show_color; extern bool progress; extern bool is_archive_cmd; /* true for archive-{get,push} */ -/* In pre-10 'replication_slot' is defined in receivelog.h */ extern char *replication_slot; -#if PG_VERSION_NUM >= 100000 extern bool temp_slot; -#endif extern bool perm_slot; /* backup options */ diff --git a/src/stream.c b/src/stream.c index 1ee8dee37..b10eb7308 100644 --- a/src/stream.c +++ b/src/stream.c @@ -2,7 +2,7 @@ * * stream.c: pg_probackup specific code for WAL streaming * - * Portions Copyright (c) 2015-2020, Postgres Professional + * Portions Copyright (c) 2015-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -174,10 +174,10 @@ checkpoint_timeout(PGconn *backup_conn) * CreateReplicationSlot(PGconn *conn, const char *slot_name, const char *plugin, * bool is_temporary, bool is_physical, bool reserve_wal, * bool slot_exists_ok) - * PG 9.5-10 + * PG 10 * CreateReplicationSlot(PGconn *conn, const char *slot_name, const char *plugin, * bool is_physical, bool slot_exists_ok) - * NOTE: PG 9.6 and 10 support reserve_wal in + * NOTE: PG 10 support reserve_wal in * pg_catalog.pg_create_physical_replication_slot(slot_name name [, immediately_reserve boolean]) * and * CREATE_REPLICATION_SLOT slot_name { PHYSICAL [ RESERVE_WAL ] | LOGICAL output_plugin } @@ -194,7 +194,7 @@ CreateReplicationSlot_compat(PGconn *conn, const char *slot_name, const char *pl #elif PG_VERSION_NUM >= 110000 return CreateReplicationSlot(conn, slot_name, plugin, is_temporary, is_physical, /* reserve_wal = */ true, slot_exists_ok); -#elif PG_VERSION_NUM >= 100000 +#else /* * PG-10 doesn't support creating temp_slot by calling CreateReplicationSlot(), but * it will be created by setting StreamCtl.temp_slot later in StreamLog() @@ -203,10 +203,6 @@ CreateReplicationSlot_compat(PGconn *conn, const char *slot_name, const char *pl return CreateReplicationSlot(conn, slot_name, plugin, /*is_temporary,*/ is_physical, /*reserve_wal,*/ slot_exists_ok); else return true; -#else - /* these parameters not supported in PG < 10 */ - Assert(!is_temporary); - return CreateReplicationSlot(conn, slot_name, plugin, /*is_temporary,*/ is_physical, /*reserve_wal,*/ slot_exists_ok); #endif } @@ -229,13 +225,8 @@ StreamLog(void *arg) stream_stop_begin = 0; /* Create repslot */ -#if PG_VERSION_NUM >= 100000 if (temp_slot || perm_slot) if (!CreateReplicationSlot_compat(stream_arg->conn, replication_slot, NULL, temp_slot, true, false)) -#else - if (perm_slot) - if (!CreateReplicationSlot_compat(stream_arg->conn, replication_slot, NULL, false, true, false)) -#endif { interrupted = true; elog(ERROR, "Couldn't create physical replication slot %s", replication_slot); @@ -248,18 +239,13 @@ StreamLog(void *arg) elog(LOG, "started streaming WAL at %X/%X (timeline %u) using%s slot %s", (uint32) (stream_arg->startpos >> 32), (uint32) stream_arg->startpos, stream_arg->starttli, -#if PG_VERSION_NUM >= 100000 temp_slot ? " temporary" : "", -#else - "", -#endif replication_slot); else elog(LOG, "started streaming WAL at %X/%X (timeline %u)", (uint32) (stream_arg->startpos >> 32), (uint32) stream_arg->startpos, stream_arg->starttli); -#if PG_VERSION_NUM >= 90600 { StreamCtl ctl; @@ -274,7 +260,6 @@ StreamLog(void *arg) ctl.synchronous = false; ctl.mark_done = false; -#if PG_VERSION_NUM >= 100000 ctl.walmethod = CreateWalDirectoryMethod( stream_arg->basedir, // (instance_config.compress_alg == NONE_COMPRESS) ? 0 : instance_config.compress_level, @@ -284,13 +269,10 @@ StreamLog(void *arg) ctl.stop_socket = PGINVALID_SOCKET; ctl.do_sync = false; /* We sync all files at the end of backup */ // ctl.mark_done /* for future use in s3 */ -#if PG_VERSION_NUM >= 100000 && PG_VERSION_NUM < 110000 +#if PG_VERSION_NUM < 110000 /* StreamCtl.temp_slot used only for PG-10, in PG>10, temp_slots are created by calling CreateReplicationSlot() */ ctl.temp_slot = temp_slot; -#endif /* PG_VERSION_NUM >= 100000 && PG_VERSION_NUM < 110000 */ -#else /* PG_VERSION_NUM < 100000 */ - ctl.basedir = (char *) stream_arg->basedir; -#endif /* PG_VERSION_NUM >= 100000 */ +#endif /* PG_VERSION_NUM < 110000 */ if (ReceiveXlogStream(stream_arg->conn, &ctl) == false) { @@ -298,25 +280,13 @@ StreamLog(void *arg) elog(ERROR, "Problem in receivexlog"); } -#if PG_VERSION_NUM >= 100000 if (!ctl.walmethod->finish()) { interrupted = true; elog(ERROR, "Could not finish writing WAL files: %s", strerror(errno)); } -#endif /* PG_VERSION_NUM >= 100000 */ - } -#else /* PG_VERSION_NUM < 90600 */ - /* PG-9.5 */ - if (ReceiveXlogStream(stream_arg->conn, stream_arg->startpos, stream_arg->starttli, - NULL, (char *) stream_arg->basedir, stop_streaming, - standby_message_timeout, NULL, false, false) == false) - { - interrupted = true; - elog(ERROR, "Problem in receivexlog"); } -#endif /* PG_VERSION_NUM >= 90600 */ /* be paranoid and sort xlog_files_list, * so if stop_lsn segno is already in the list, diff --git a/src/util.c b/src/util.c index e89f5776b..28bdf283e 100644 --- a/src/util.c +++ b/src/util.c @@ -102,11 +102,7 @@ checkControlFile(ControlFileData *ControlFile) static void digestControlFile(ControlFileData *ControlFile, char *src, size_t size) { -#if PG_VERSION_NUM >= 100000 int ControlFileSize = PG_CONTROL_FILE_SIZE; -#else - int ControlFileSize = PG_CONTROL_SIZE; -#endif if (size != ControlFileSize) elog(ERROR, "unexpected control file size %d, expected %d", @@ -127,11 +123,7 @@ writeControlFile(fio_location location, const char *path, ControlFileData *Contr int fd; char *buffer = NULL; -#if PG_VERSION_NUM >= 100000 int ControlFileSize = PG_CONTROL_FILE_SIZE; -#else - int ControlFileSize = PG_CONTROL_SIZE; -#endif /* copy controlFileSize */ buffer = pg_malloc0(ControlFileSize); @@ -207,44 +199,25 @@ get_current_timeline_from_control(fio_location location, const char *pgdata_path } /* - * Get last check point record ptr from pg_tonrol. + * Get last check point record ptr from pg_control. */ XLogRecPtr get_checkpoint_location(PGconn *conn) { -#if PG_VERSION_NUM >= 90600 PGresult *res; uint32 lsn_hi; uint32 lsn_lo; XLogRecPtr lsn; -#if PG_VERSION_NUM >= 100000 res = pgut_execute(conn, "SELECT checkpoint_lsn FROM pg_catalog.pg_control_checkpoint()", 0, NULL); -#else - res = pgut_execute(conn, - "SELECT checkpoint_location FROM pg_catalog.pg_control_checkpoint()", - 0, NULL); -#endif XLogDataFromLSN(PQgetvalue(res, 0, 0), &lsn_hi, &lsn_lo); PQclear(res); /* Calculate LSN */ lsn = ((uint64) lsn_hi) << 32 | lsn_lo; return lsn; -#else - /* PG-9.5 */ - char *buffer; - size_t size; - ControlFileData ControlFile; - - buffer = slurpFile(FIO_DB_HOST, instance_config.pgdata, XLOG_CONTROL_FILE, &size, false); - digestControlFile(&ControlFile, buffer, size); - pg_free(buffer); - - return ControlFile.checkPoint; -#endif } uint64 @@ -267,7 +240,6 @@ get_system_identifier(fio_location location, const char *pgdata_path, bool safe) uint64 get_remote_system_identifier(PGconn *conn) { -#if PG_VERSION_NUM >= 90600 PGresult *res; uint64 system_id_conn; char *val; @@ -284,18 +256,6 @@ get_remote_system_identifier(PGconn *conn) PQclear(res); return system_id_conn; -#else - /* PG-9.5 */ - char *buffer; - size_t size; - ControlFileData ControlFile; - - buffer = slurpFile(FIO_DB_HOST, instance_config.pgdata, XLOG_CONTROL_FILE, &size, false); - digestControlFile(&ControlFile, buffer, size); - pg_free(buffer); - - return ControlFile.system_identifier; -#endif } uint32 diff --git a/src/utils/pgut.c b/src/utils/pgut.c index c220b807d..f1b8da0b2 100644 --- a/src/utils/pgut.c +++ b/src/utils/pgut.c @@ -3,7 +3,7 @@ * pgut.c * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2017-2021, Postgres Professional + * Portions Copyright (c) 2017-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -20,11 +20,7 @@ #include "common/string.h" #endif -#if PG_VERSION_NUM >= 100000 #include "common/connect.h" -#else -#include "fe_utils/connect.h" -#endif #include @@ -94,7 +90,7 @@ prompt_for_password(const char *username) snprintf(message, lengthof(message), "Password for user %s: ", username); password = simple_prompt(message , false); } -#elif PG_VERSION_NUM >= 100000 +#else password = (char *) pgut_malloc(sizeof(char) * 100 + 1); if (username == NULL) simple_prompt("Password: ", password, 100, false); @@ -104,17 +100,7 @@ prompt_for_password(const char *username) snprintf(message, lengthof(message), "Password for user %s: ", username); simple_prompt(message, password, 100, false); } -#else - if (username == NULL) - password = simple_prompt("Password: ", 100, false); - else - { - char message[256]; - snprintf(message, lengthof(message), "Password for user %s: ", username); - password = simple_prompt(message, 100, false); - } #endif - in_password = false; } diff --git a/tests/backup.py b/tests/backup.py index 20ac480e0..23836cdbe 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -1856,118 +1856,43 @@ def test_backup_with_least_privileges_role(self): "CREATE SCHEMA ptrack; " "CREATE EXTENSION ptrack WITH SCHEMA ptrack") - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # >= 10 - else: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) + # PG >= 10 + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) if self.ptrack: node.safe_psql( diff --git a/tests/checkdb.py b/tests/checkdb.py index 5b6dda250..71f81fd6c 100644 --- a/tests/checkdb.py +++ b/tests/checkdb.py @@ -640,66 +640,8 @@ def test_checkdb_with_least_privileges(self): "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC;") - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - 'CREATE ROLE backup WITH LOGIN; ' - 'GRANT CONNECT ON DATABASE backupdb to backup; ' - 'GRANT USAGE ON SCHEMA pg_catalog TO backup; ' - 'GRANT USAGE ON SCHEMA public TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_am TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_class TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_index TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.texteq(text, text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.charne("char", "char") TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; ' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' # amcheck-next function - ) - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - 'CREATE ROLE backup WITH LOGIN; ' - 'GRANT CONNECT ON DATABASE backupdb to backup; ' - 'GRANT USAGE ON SCHEMA pg_catalog TO backup; ' - 'GRANT USAGE ON SCHEMA public TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_am TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_class TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_index TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.texteq(text, text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.charne("char", "char") TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; ' -# 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' - ) # PG 10 - elif self.get_version(node) > 100000 and self.get_version(node) < 110000: + if self.get_version(node) < 110000: node.safe_psql( 'backupdb', 'CREATE ROLE backup WITH LOGIN; ' diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index e3036d9c4..0fa252739 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -417,52 +417,21 @@ def simple_bootstrap(self, node, role) -> None: 'postgres', 'CREATE ROLE {0} WITH LOGIN REPLICATION'.format(role)) - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'postgres', - 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0};'.format(role)) - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'postgres', - 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO {0};'.format(role)) - # >= 10 - else: - node.safe_psql( - 'postgres', - 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO {0};'.format(role)) + # PG >= 10 + node.safe_psql( + 'postgres', + 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO {0};'.format(role)) def create_tblspace_in_node(self, node, tblspc_name, tblspc_path=None, cfs=False): res = node.execute( From cdc0d1581b82209bebe79dd4c1cd18a75b95f232 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 19 Jul 2022 14:59:48 +0300 Subject: [PATCH 032/339] remove unused structs --- src/utils/file.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index 53ab451f8..d492bf2c6 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -3423,11 +3423,6 @@ fio_communicate(int in, int out) } // CLASSES -typedef struct pioError { - fobjErr p; /* parent */ - int _errno; -} pioError; - typedef struct pioLocalDrive { } pioLocalDrive; @@ -3481,11 +3476,6 @@ typedef struct pioWriteFilter { } pioWriteFilter; #ifdef HAVE_LIBZ -typedef struct pioGZError { - fobjErr p; /* parent */ - int _gzerrno; -} pioGZError; - typedef struct pioGZCompress { z_stream strm; bool finished; From ae275dccd35e2e865bce92f51e554331947cd030 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Wed, 20 Jul 2022 02:15:07 +0300 Subject: [PATCH 033/339] [PBCKP-232] remove 9.5-9.6 support, part 2 --- .travis.yml | 4 - src/backup.c | 172 ++++++-------------- src/catchup.c | 13 +- src/dir.c | 16 -- src/pg_probackup.h | 6 +- src/utils/file.c | 5 +- tests/archive.py | 169 ++++---------------- tests/auth_test.py | 30 +--- tests/backup.py | 272 ++++++++------------------------ tests/catchup.py | 41 +++-- tests/false_positive.py | 3 - tests/helpers/ptrack_helpers.py | 32 +--- tests/incr_restore.py | 10 -- tests/pgpro2068.py | 24 +-- tests/pgpro560.py | 40 ++--- tests/ptrack.py | 143 ++++------------- tests/replica.py | 85 +--------- tests/restore.py | 201 +++++++---------------- tests/retention.py | 13 -- tests/validate.py | 19 +-- 20 files changed, 295 insertions(+), 1003 deletions(-) diff --git a/.travis.yml b/.travis.yml index ed932b68e..9e48c9cab 100644 --- a/.travis.yml +++ b/.travis.yml @@ -32,8 +32,6 @@ env: - PG_VERSION=12 PG_BRANCH=REL_12_STABLE PTRACK_PATCH_PG_BRANCH=REL_12_STABLE - PG_VERSION=11 PG_BRANCH=REL_11_STABLE PTRACK_PATCH_PG_BRANCH=REL_11_STABLE - PG_VERSION=10 PG_BRANCH=REL_10_STABLE - - PG_VERSION=9.6 PG_BRANCH=REL9_6_STABLE - - PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=archive # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=backup # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup @@ -52,8 +50,6 @@ env: jobs: allow_failures: - if: env(PG_BRANCH) = master - - if: env(PG_BRANCH) = REL9_6_STABLE - - if: env(PG_BRANCH) = REL9_5_STABLE # - if: env(MODE) IN (archive, backup, delta, locking, merge, replica, retention, restore) # Only run CI for master branch commits to limit our travis usage diff --git a/src/backup.c b/src/backup.c index 0edb57710..449d8d09c 100644 --- a/src/backup.c +++ b/src/backup.c @@ -32,9 +32,6 @@ parray *backup_files_list = NULL; /* We need critical section for datapagemap_add() in case of using threads */ static pthread_mutex_t backup_pagemap_mutex = PTHREAD_MUTEX_INITIALIZER; -// TODO: move to PGnodeInfo -bool exclusive_backup = false; - /* Is pg_start_backup() was executed */ bool backup_in_progress = false; @@ -80,7 +77,7 @@ backup_stopbackup_callback(bool fatal, void *userdata) { elog(WARNING, "backup in progress, stop backup"); /* don't care about stop_lsn in case of error */ - pg_stop_backup_send(st->conn, st->server_version, current.from_replica, exclusive_backup, NULL); + pg_stop_backup_send(st->conn, st->server_version, current.from_replica, NULL); } } @@ -493,10 +490,10 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, /* Notify end of backup */ pg_stop_backup(instanceState, ¤t, backup_conn, nodeInfo); - /* In case of backup from replica >= 9.6 we must fix minRecPoint, + /* In case of backup from replica we must fix minRecPoint, * First we must find pg_control in backup_files_list. */ - if (current.from_replica && !exclusive_backup) + if (current.from_replica) { pgFile *pg_control = NULL; @@ -781,11 +778,6 @@ do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, } } - if (current.from_replica && exclusive_backup) - /* Check master connection options */ - if (instance_config.master_conn_opt.pghost == NULL) - elog(ERROR, "Options for connection to master must be provided to perform backup from replica"); - /* add note to backup if requested */ if (set_backup_params && set_backup_params->note) add_note(¤t, set_backup_params->note); @@ -866,22 +858,12 @@ check_server_version(PGconn *conn, PGNodeInfo *nodeInfo) elog(ERROR, "Unknown server version %d", nodeInfo->server_version); if (nodeInfo->server_version < 100000) - sprintf(nodeInfo->server_version_str, "%d.%d", - nodeInfo->server_version / 10000, - (nodeInfo->server_version / 100) % 100); - else - sprintf(nodeInfo->server_version_str, "%d", - nodeInfo->server_version / 10000); - - if (nodeInfo->server_version < 90500) elog(ERROR, "server version is %s, must be %s or higher", - nodeInfo->server_version_str, "9.5"); + nodeInfo->server_version_str, "10"); - if (current.from_replica && nodeInfo->server_version < 90600) - elog(ERROR, - "server version is %s, must be %s or higher for backup from replica", - nodeInfo->server_version_str, "9.6"); + sprintf(nodeInfo->server_version_str, "%d", + nodeInfo->server_version / 10000); if (nodeInfo->pgpro_support) res = pgut_execute(conn, "SELECT pg_catalog.pgpro_edition()", 0, NULL); @@ -922,9 +904,6 @@ check_server_version(PGconn *conn, PGNodeInfo *nodeInfo) if (res) PQclear(res); - - /* Do exclusive backup only for PostgreSQL 9.5 */ - exclusive_backup = nodeInfo->server_version < 90600; } /* @@ -1006,16 +985,10 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup, /* 2nd argument is 'fast'*/ params[1] = smooth ? "false" : "true"; - if (!exclusive_backup) - res = pgut_execute(conn, - "SELECT pg_catalog.pg_start_backup($1, $2, false)", - 2, - params); - else - res = pgut_execute(conn, - "SELECT pg_catalog.pg_start_backup($1, $2)", - 2, - params); + res = pgut_execute(conn, + "SELECT pg_catalog.pg_start_backup($1, $2, false)", + 2, + params); /* * Set flag that pg_start_backup() was called. If an error will happen it @@ -1034,14 +1007,10 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup, PQclear(res); if ((!backup->stream || backup->backup_mode == BACKUP_MODE_DIFF_PAGE) && - !backup->from_replica && - !(nodeInfo->server_version < 90600 && - !nodeInfo->is_superuser)) + !backup->from_replica) /* * Switch to a new WAL segment. It is necessary to get archived WAL * segment, which includes start LSN of current backup. - * Don`t do this for replica backups and for PG 9.5 if pguser is not superuser - * (because in 9.5 only superuser can switch WAL) */ pg_switch_wal(conn); } @@ -1546,20 +1515,9 @@ pg_create_restore_point(PGconn *conn, time_t backup_start_time) } void -pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica, bool is_exclusive, char **query_text) +pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica, char **query_text) { static const char - stop_exlusive_backup_query[] = - /* - * Stop the non-exclusive backup. Besides stop_lsn it returns from - * pg_stop_backup(false) copy of the backup label and tablespace map - * so they can be written to disk by the caller. - * TODO, question: add NULLs as backup_label and tablespace_map? - */ - "SELECT" - " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," - " current_timestamp(0)::timestamptz," - " pg_catalog.pg_stop_backup() as lsn", stop_backup_on_master_query[] = "SELECT" " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," @@ -1568,16 +1526,8 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica " labelfile," " spcmapfile" " FROM pg_catalog.pg_stop_backup(false, false)", - stop_backup_on_master_before10_query[] = - "SELECT" - " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," - " current_timestamp(0)::timestamptz," - " lsn," - " labelfile," - " spcmapfile" - " FROM pg_catalog.pg_stop_backup(false)", /* - * In case of backup from replica >= 9.6 we do not trust minRecPoint + * In case of backup from replica we do not trust minRecPoint * and stop_backup LSN, so we use latest replayed LSN as STOP LSN. */ stop_backup_on_replica_query[] = @@ -1587,28 +1537,12 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica " pg_catalog.pg_last_wal_replay_lsn()," " labelfile," " spcmapfile" - " FROM pg_catalog.pg_stop_backup(false, false)", - stop_backup_on_replica_before10_query[] = - "SELECT" - " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," - " current_timestamp(0)::timestamptz," - " pg_catalog.pg_last_xlog_replay_location()," - " labelfile," - " spcmapfile" - " FROM pg_catalog.pg_stop_backup(false)"; + " FROM pg_catalog.pg_stop_backup(false, false)"; const char * const stop_backup_query = - is_exclusive ? - stop_exlusive_backup_query : - server_version >= 100000 ? - (is_started_on_replica ? + is_started_on_replica ? stop_backup_on_replica_query : - stop_backup_on_master_query - ) : - (is_started_on_replica ? - stop_backup_on_replica_before10_query : - stop_backup_on_master_before10_query - ); + stop_backup_on_master_query; bool sent = false; /* Make proper timestamp format for parse_time(recovery_time) */ @@ -1641,7 +1575,7 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica */ void pg_stop_backup_consume(PGconn *conn, int server_version, - bool is_exclusive, uint32 timeout, const char *query_text, + uint32 timeout, const char *query_text, PGStopBackupResult *result) { PGresult *query_result; @@ -1743,28 +1677,18 @@ pg_stop_backup_consume(PGconn *conn, int server_version, /* get backup_label_content */ result->backup_label_content = NULL; // if (!PQgetisnull(query_result, 0, backup_label_colno)) - if (!is_exclusive) - { - result->backup_label_content_len = PQgetlength(query_result, 0, backup_label_colno); - if (result->backup_label_content_len > 0) - result->backup_label_content = pgut_strndup(PQgetvalue(query_result, 0, backup_label_colno), - result->backup_label_content_len); - } else { - result->backup_label_content_len = 0; - } + result->backup_label_content_len = PQgetlength(query_result, 0, backup_label_colno); + if (result->backup_label_content_len > 0) + result->backup_label_content = pgut_strndup(PQgetvalue(query_result, 0, backup_label_colno), + result->backup_label_content_len); /* get tablespace_map_content */ result->tablespace_map_content = NULL; // if (!PQgetisnull(query_result, 0, tablespace_map_colno)) - if (!is_exclusive) - { - result->tablespace_map_content_len = PQgetlength(query_result, 0, tablespace_map_colno); - if (result->tablespace_map_content_len > 0) - result->tablespace_map_content = pgut_strndup(PQgetvalue(query_result, 0, tablespace_map_colno), - result->tablespace_map_content_len); - } else { - result->tablespace_map_content_len = 0; - } + result->tablespace_map_content_len = PQgetlength(query_result, 0, tablespace_map_colno); + if (result->tablespace_map_content_len > 0) + result->tablespace_map_content = pgut_strndup(PQgetvalue(query_result, 0, tablespace_map_colno), + result->tablespace_map_content_len); } /* @@ -1832,21 +1756,18 @@ pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startb /* Create restore point * Only if backup is from master. - * For PG 9.5 create restore point only if pguser is superuser. */ - if (!backup->from_replica && - !(nodeInfo->server_version < 90600 && - !nodeInfo->is_superuser)) //TODO: check correctness + if (!backup->from_replica) pg_create_restore_point(pg_startbackup_conn, backup->start_time); /* Execute pg_stop_backup using PostgreSQL connection */ - pg_stop_backup_send(pg_startbackup_conn, nodeInfo->server_version, backup->from_replica, exclusive_backup, &query_text); + pg_stop_backup_send(pg_startbackup_conn, nodeInfo->server_version, backup->from_replica, &query_text); /* * Wait for the result of pg_stop_backup(), but no longer than * archive_timeout seconds */ - pg_stop_backup_consume(pg_startbackup_conn, nodeInfo->server_version, exclusive_backup, timeout, query_text, &stop_backup_result); + pg_stop_backup_consume(pg_startbackup_conn, nodeInfo->server_version, timeout, query_text, &stop_backup_result); if (backup->stream) { @@ -1859,28 +1780,25 @@ pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startb wait_wal_and_calculate_stop_lsn(xlog_path, stop_backup_result.lsn, backup); /* Write backup_label and tablespace_map */ - if (!exclusive_backup) + Assert(stop_backup_result.backup_label_content != NULL); + + /* Write backup_label */ + pg_stop_backup_write_file_helper(backup->database_dir, PG_BACKUP_LABEL_FILE, "backup label", + stop_backup_result.backup_label_content, stop_backup_result.backup_label_content_len, + backup_files_list); + free(stop_backup_result.backup_label_content); + stop_backup_result.backup_label_content = NULL; + stop_backup_result.backup_label_content_len = 0; + + /* Write tablespace_map */ + if (stop_backup_result.tablespace_map_content != NULL) { - Assert(stop_backup_result.backup_label_content != NULL); - - /* Write backup_label */ - pg_stop_backup_write_file_helper(backup->database_dir, PG_BACKUP_LABEL_FILE, "backup label", - stop_backup_result.backup_label_content, stop_backup_result.backup_label_content_len, + pg_stop_backup_write_file_helper(backup->database_dir, PG_TABLESPACE_MAP_FILE, "tablespace map", + stop_backup_result.tablespace_map_content, stop_backup_result.tablespace_map_content_len, backup_files_list); - free(stop_backup_result.backup_label_content); - stop_backup_result.backup_label_content = NULL; - stop_backup_result.backup_label_content_len = 0; - - /* Write tablespace_map */ - if (stop_backup_result.tablespace_map_content != NULL) - { - pg_stop_backup_write_file_helper(backup->database_dir, PG_TABLESPACE_MAP_FILE, "tablespace map", - stop_backup_result.tablespace_map_content, stop_backup_result.tablespace_map_content_len, - backup_files_list); - free(stop_backup_result.tablespace_map_content); - stop_backup_result.tablespace_map_content = NULL; - stop_backup_result.tablespace_map_content_len = 0; - } + free(stop_backup_result.tablespace_map_content); + stop_backup_result.tablespace_map_content = NULL; + stop_backup_result.tablespace_map_content_len = 0; } if (backup->stream) diff --git a/src/catchup.c b/src/catchup.c index 522279ac9..1195f7a7f 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -185,9 +185,6 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, elog(ERROR, "Ptrack is disabled"); } - if (current.from_replica && exclusive_backup) - elog(ERROR, "Catchup from standby is only available for PostgreSQL >= 9.6"); - /* check that we don't overwrite tablespace in source pgdata */ catchup_check_tablespaces_existance_in_tbsmapping(source_conn); @@ -1012,13 +1009,13 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, pg_silent_client_messages(source_conn); /* Execute pg_stop_backup using PostgreSQL connection */ - pg_stop_backup_send(source_conn, source_node_info.server_version, current.from_replica, exclusive_backup, &stop_backup_query_text); + pg_stop_backup_send(source_conn, source_node_info.server_version, current.from_replica, &stop_backup_query_text); /* * Wait for the result of pg_stop_backup(), but no longer than * archive_timeout seconds */ - pg_stop_backup_consume(source_conn, source_node_info.server_version, exclusive_backup, timeout, stop_backup_query_text, &stop_backup_result); + pg_stop_backup_consume(source_conn, source_node_info.server_version, timeout, stop_backup_query_text, &stop_backup_result); /* Cleanup */ pg_free(stop_backup_query_text); @@ -1076,12 +1073,10 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, } /* - * In case of backup from replica >= 9.6 we must fix minRecPoint + * In case of backup from replica we must fix minRecPoint */ - if (current.from_replica && !exclusive_backup) - { + if (current.from_replica) set_min_recovery_point(source_pg_control_file, dest_pgdata, current.stop_lsn); - } /* close ssh session in main thread */ fio_disconnect(); diff --git a/src/dir.c b/src/dir.c index 3e5e28cef..5f25f2ee4 100644 --- a/src/dir.c +++ b/src/dir.c @@ -83,11 +83,7 @@ static char *pgdata_exclude_files[] = "probackup_recovery.conf", "recovery.signal", "standby.signal", - NULL -}; -static char *pgdata_exclude_files_non_exclusive[] = -{ /*skip in non-exclusive backup */ "backup_label", "tablespace_map", @@ -571,18 +567,6 @@ dir_check_file(pgFile *file, bool backup_logs) /* Check if we need to exclude file by name */ if (S_ISREG(file->mode)) { - if (!exclusive_backup) - { - for (i = 0; pgdata_exclude_files_non_exclusive[i]; i++) - if (strcmp(file->rel_path, - pgdata_exclude_files_non_exclusive[i]) == 0) - { - /* Skip */ - elog(VERBOSE, "Excluding file: %s", file->name); - return CHECK_FALSE; - } - } - for (i = 0; pgdata_exclude_files[i]; i++) if (strcmp(file->rel_path, pgdata_exclude_files[i]) == 0) { diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 2439fc23b..eb051065b 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -782,8 +782,6 @@ extern bool smooth_checkpoint; /* remote probackup options */ extern char* remote_agent; -extern bool exclusive_backup; - /* delete options */ extern bool delete_wal; extern bool delete_expired; @@ -1273,9 +1271,9 @@ extern void pg_start_backup(const char *label, bool smooth, pgBackup *backup, PGNodeInfo *nodeInfo, PGconn *conn); extern void pg_silent_client_messages(PGconn *conn); extern void pg_create_restore_point(PGconn *conn, time_t backup_start_time); -extern void pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica, bool is_exclusive, char **query_text); +extern void pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica, char **query_text); extern void pg_stop_backup_consume(PGconn *conn, int server_version, - bool is_exclusive, uint32 timeout, const char *query_text, + uint32 timeout, const char *query_text, PGStopBackupResult *result); extern void pg_stop_backup_write_file_helper(const char *path, const char *filename, const char *error_msg_filename, const void *data, size_t len, parray *file_list); diff --git a/src/utils/file.c b/src/utils/file.c index 53ab451f8..92bebc7c8 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -38,7 +38,6 @@ typedef struct bool follow_symlink; bool add_root; bool backup_logs; - bool exclusive_backup; bool skip_hidden; int external_dir_num; } fio_list_dir_request; @@ -2798,7 +2797,6 @@ fio_list_dir_internal(parray *files, const char *root, bool exclude, req.follow_symlink = follow_symlink; req.add_root = add_root; req.backup_logs = backup_logs; - req.exclusive_backup = exclusive_backup; req.skip_hidden = skip_hidden; req.external_dir_num = external_dir_num; @@ -2891,7 +2889,6 @@ fio_list_dir_impl(int out, char* buf) * TODO: correctly send elog messages from agent to main process. */ instance_config.logger.log_level_console = ERROR; - exclusive_backup = req->exclusive_backup; dir_list_file(file_files, req->path, req->exclude, req->follow_symlink, req->add_root, req->backup_logs, req->skip_hidden, @@ -4863,4 +4860,4 @@ init_pio_objects(void) localDrive = bindref_pioDrive($alloc(pioLocalDrive)); remoteDrive = bindref_pioDrive($alloc(pioRemoteDrive)); -} \ No newline at end of file +} diff --git a/tests/archive.py b/tests/archive.py index 52fb225e8..fe3d89b17 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -84,11 +84,6 @@ def test_pgpro434_2(self): 'checkpoint_timeout': '30s'} ) - if self.get_version(node) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because pg_control_checkpoint() is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -264,15 +259,9 @@ def test_pgpro434_3(self): with open(log_file, 'r') as f: log_content = f.read() - # in PG =< 9.6 pg_stop_backup always wait - if self.get_version(node) < 100000: - self.assertIn( - "ERROR: pg_stop_backup doesn't answer in 60 seconds, cancel it", - log_content) - else: - self.assertIn( - "ERROR: WAL segment 000000010000000000000003 could not be archived in 60 seconds", - log_content) + self.assertIn( + "ERROR: WAL segment 000000010000000000000003 could not be archived in 60 seconds", + log_content) log_file = os.path.join(node.logs_dir, 'postgresql.log') with open(log_file, 'r') as f: @@ -418,12 +407,8 @@ def test_archive_push_file_exists(self): self.assertNotIn( 'pg_probackup archive-push completed successfully', log_content) - if self.get_version(node) < 100000: - wal_src = os.path.join( - node.data_dir, 'pg_xlog', '000000010000000000000001') - else: - wal_src = os.path.join( - node.data_dir, 'pg_wal', '000000010000000000000001') + wal_src = os.path.join( + node.data_dir, 'pg_wal', '000000010000000000000001') if self.archive_compress: with open(wal_src, 'rb') as f_in, gzip.open( @@ -555,16 +540,10 @@ def test_archive_push_partial_file_exists(self): "postgres", "INSERT INTO t1 VALUES (1) RETURNING (xmin)").decode('utf-8').rstrip() - if self.get_version(node) < 100000: - filename_orig = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location());").rstrip() - else: - filename_orig = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn());").rstrip() + filename_orig = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn());").rstrip() filename_orig = filename_orig.decode('utf-8') @@ -634,16 +613,10 @@ def test_archive_push_part_file_exists_not_stale(self): "postgres", "create table t2()") - if self.get_version(node) < 100000: - filename_orig = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location());").rstrip() - else: - filename_orig = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn());").rstrip() + filename_orig = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn());").rstrip() filename_orig = filename_orig.decode('utf-8') @@ -708,11 +681,6 @@ def test_replica_archive(self): 'checkpoint_timeout': '30s', 'max_wal_size': '32MB'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) # ADD INSTANCE 'MASTER' self.add_instance(backup_dir, 'master', master) @@ -839,11 +807,6 @@ def test_master_and_replica_parallel_archiving(self): 'archive_timeout': '10s'} ) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - replica = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'replica')) replica.cleanup() @@ -921,9 +884,6 @@ def test_basic_master_and_replica_concurrent_archiving(self): set replica with archiving, make sure that archiving on both node is working. """ - if self.pg_config_version < self.version_to_num('9.6.0'): - return unittest.skip('You need PostgreSQL >= 9.6 for this test') - fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') master = self.make_simple_node( @@ -934,11 +894,6 @@ def test_basic_master_and_replica_concurrent_archiving(self): 'checkpoint_timeout': '30s', 'archive_timeout': '10s'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - replica = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'replica')) replica.cleanup() @@ -1115,10 +1070,7 @@ def test_archive_pg_receivexlog(self): self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() - if self.get_version(node) < 100000: - pg_receivexlog_path = self.get_bin_path('pg_receivexlog') - else: - pg_receivexlog_path = self.get_bin_path('pg_receivewal') + pg_receivexlog_path = self.get_bin_path('pg_receivewal') pg_receivexlog = self.run_binary( [ @@ -1188,11 +1140,8 @@ def test_archive_pg_receivexlog_compression_pg10(self): self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() - if self.get_version(node) < self.version_to_num('10.0'): - return unittest.skip('You need PostgreSQL >= 10 for this test') - else: - pg_receivexlog_path = self.get_bin_path('pg_receivewal') + pg_receivexlog_path = self.get_bin_path('pg_receivewal') pg_receivexlog = self.run_binary( [ pg_receivexlog_path, '-p', str(node.port), '--synchronous', @@ -1269,11 +1218,6 @@ def test_archive_catalog(self): 'archive_timeout': '30s', 'checkpoint_timeout': '30s'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) self.set_archiving(backup_dir, 'master', master) @@ -1930,10 +1874,6 @@ def test_waldir_outside_pgdata_archiving(self): """ check that archive-push works correct with symlinked waldir """ - if self.pg_config_version < self.version_to_num('10.0'): - return unittest.skip( - 'Skipped because waldir outside pgdata is supported since PG 10') - fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') external_wal_dir = os.path.join(self.tmp_path, module_name, fname, 'ext_wal_dir') @@ -2041,10 +1981,7 @@ def test_archiving_and_slots(self): self.set_archiving(backup_dir, 'node', node, log_level='verbose') node.slow_start() - if self.get_version(node) < 100000: - pg_receivexlog_path = self.get_bin_path('pg_receivexlog') - else: - pg_receivexlog_path = self.get_bin_path('pg_receivewal') + pg_receivexlog_path = self.get_bin_path('pg_receivewal') # "pg_receivewal --create-slot --slot archive_slot --if-not-exists " # "&& pg_receivewal --synchronous -Z 1 /tmp/wal --slot archive_slot --no-loop" @@ -2167,22 +2104,13 @@ def test_archive_pg_receivexlog_partial_handling(self): set_replication=True, initdb_params=['--data-checksums']) - if self.get_version(node) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() - if self.get_version(node) < 100000: - app_name = 'pg_receivexlog' - pg_receivexlog_path = self.get_bin_path('pg_receivexlog') - else: - app_name = 'pg_receivewal' - pg_receivexlog_path = self.get_bin_path('pg_receivewal') + app_name = 'pg_receivewal' + pg_receivexlog_path = self.get_bin_path('pg_receivewal') cmdline = [ pg_receivexlog_path, '-p', str(node.port), '--synchronous', @@ -2376,11 +2304,6 @@ def test_archive_get_batching_sanity(self): set_replication=True, initdb_params=['--data-checksums']) - if self.get_version(node) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2600,16 +2523,10 @@ def test_archive_show_partial_files_handling(self): "postgres", "create table t1()") - if self.get_version(node) < 100000: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location())").rstrip() - else: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() + filename = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() filename = filename.decode('utf-8') @@ -2624,16 +2541,10 @@ def test_archive_show_partial_files_handling(self): "postgres", "create table t2()") - if self.get_version(node) < 100000: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location())").rstrip() - else: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() + filename = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() filename = filename.decode('utf-8') @@ -2648,16 +2559,10 @@ def test_archive_show_partial_files_handling(self): "postgres", "create table t3()") - if self.get_version(node) < 100000: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location())").rstrip() - else: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() + filename = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() filename = filename.decode('utf-8') @@ -2672,16 +2577,10 @@ def test_archive_show_partial_files_handling(self): "postgres", "create table t4()") - if self.get_version(node) < 100000: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location())").rstrip() - else: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() + filename = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() filename = filename.decode('utf-8') diff --git a/tests/auth_test.py b/tests/auth_test.py index 78af21be9..16c73308f 100644 --- a/tests/auth_test.py +++ b/tests/auth_test.py @@ -62,14 +62,9 @@ def test_backup_via_unprivileged_user(self): "GRANT EXECUTE ON FUNCTION" " pg_start_backup(text, boolean, boolean) TO backup;") - if self.get_version(node) < 100000: - node.safe_psql( - 'postgres', - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup") - else: - node.safe_psql( - 'postgres', - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup") + node.safe_psql( + 'postgres', + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup") try: self.backup_node( @@ -103,19 +98,10 @@ def test_backup_via_unprivileged_user(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - if self.get_version(node) < self.version_to_num('10.0'): - node.safe_psql( - "postgres", - "GRANT EXECUTE ON FUNCTION pg_stop_backup(boolean) TO backup") - else: - node.safe_psql( - "postgres", - "GRANT EXECUTE ON FUNCTION " - "pg_stop_backup(boolean, boolean) TO backup") - # Do this for ptrack backups - node.safe_psql( - "postgres", - "GRANT EXECUTE ON FUNCTION pg_stop_backup() TO backup") + node.safe_psql( + "postgres", + "GRANT EXECUTE ON FUNCTION " + "pg_stop_backup(boolean, boolean) TO backup") self.backup_node( backup_dir, 'node', node, options=['-U', 'backup']) @@ -184,8 +170,6 @@ def setUpClass(cls): "GRANT EXECUTE ON FUNCTION current_setting(text) TO backup; " "GRANT EXECUTE ON FUNCTION pg_is_in_recovery() TO backup; " "GRANT EXECUTE ON FUNCTION pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_stop_backup(boolean) TO backup; " "GRANT EXECUTE ON FUNCTION pg_create_restore_point(text) TO backup; " "GRANT EXECUTE ON FUNCTION pg_switch_xlog() TO backup; " "GRANT EXECUTE ON FUNCTION txid_current() TO backup; " diff --git a/tests/backup.py b/tests/backup.py index 23836cdbe..685436291 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -1427,9 +1427,6 @@ def test_basic_temp_slot_for_stream_backup(self): initdb_params=['--data-checksums'], pg_options={'max_wal_size': '40MB'}) - if self.get_version(node) < self.version_to_num('10.0'): - return unittest.skip('You need PostgreSQL >= 10 for this test') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2167,62 +2164,24 @@ def test_backup_with_less_privileges_role(self): 'backupdb', 'CREATE EXTENSION ptrack') - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "BEGIN; " - "CREATE ROLE backup WITH LOGIN; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " - "COMMIT;" - ) - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "BEGIN; " - "CREATE ROLE backup WITH LOGIN; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " - "COMMIT;" - ) - # >= 10 - else: - node.safe_psql( - 'backupdb', - "BEGIN; " - "CREATE ROLE backup WITH LOGIN; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " - "COMMIT;" - ) + # PG >= 10 + node.safe_psql( + 'backupdb', + "BEGIN; " + "CREATE ROLE backup WITH LOGIN; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " + "COMMIT;" + ) # enable STREAM backup node.safe_psql( @@ -2262,10 +2221,6 @@ def test_backup_with_less_privileges_role(self): backup_dir, 'node', node, backup_type='ptrack', datname='backupdb', options=['--stream', '-U', 'backup']) - if self.get_version(node) < 90600: - self.del_test_dir(module_name, fname) - return - # Restore as replica replica = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'replica')) @@ -2952,71 +2907,28 @@ def test_missing_replication_permission(self): 'postgres', 'CREATE DATABASE backupdb') - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") - # >= 10 - else: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) + # PG >= 10 + node.safe_psql( + 'backupdb', + "CREATE ROLE backup WITH LOGIN; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) if ProbackupTest.enterprise: node.safe_psql( @@ -3083,73 +2995,28 @@ def test_missing_replication_permission_1(self): 'postgres', 'CREATE DATABASE backupdb') - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # >= 10 - else: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) + # PG >= 10 + node.safe_psql( + 'backupdb', + "CREATE ROLE backup WITH LOGIN; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) if ProbackupTest.enterprise: node.safe_psql( @@ -3305,18 +3172,9 @@ def test_pg_stop_backup_missing_permissions(self): self.simple_bootstrap(node, 'backup') - if self.get_version(node) < 90600: - node.safe_psql( - 'postgres', - 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() FROM backup') - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'postgres', - 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) FROM backup') - else: - node.safe_psql( - 'postgres', - 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) FROM backup') + node.safe_psql( + 'postgres', + 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) FROM backup') # Full backup in streaming mode try: diff --git a/tests/catchup.py b/tests/catchup.py index a83755c54..ac243da72 100644 --- a/tests/catchup.py +++ b/tests/catchup.py @@ -1231,27 +1231,26 @@ def test_catchup_with_replication_slot(self): ).decode('utf-8').rstrip() self.assertEqual(slot_name, 'pg_probackup_perm_slot', 'Slot name mismatch') - # 5. --perm-slot --temp-slot (PG>=10) - if self.get_version(src_pg) >= self.version_to_num('10.0'): - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_5')) - try: - self.catchup_node( - backup_mode = 'FULL', - source_pgdata = src_pg.data_dir, - destination_node = dst_pg, - options = [ - '-d', 'postgres', '-p', str(src_pg.port), '--stream', - '--perm-slot', - '--temp-slot' - ] - ) - self.assertEqual(1, 0, "Expecting Error because conflicting options --perm-slot and --temp-slot used together\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: You cannot specify "--perm-slot" option with the "--temp-slot" option', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + # 5. --perm-slot --temp-slot + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_5')) + try: + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = [ + '-d', 'postgres', '-p', str(src_pg.port), '--stream', + '--perm-slot', + '--temp-slot' + ] + ) + self.assertEqual(1, 0, "Expecting Error because conflicting options --perm-slot and --temp-slot used together\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: You cannot specify "--perm-slot" option with the "--temp-slot" option', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) #self.assertEqual(1, 0, 'Stop test') self.del_test_dir(module_name, self.fname) diff --git a/tests/false_positive.py b/tests/false_positive.py index a101f8107..9cff54185 100644 --- a/tests/false_positive.py +++ b/tests/false_positive.py @@ -113,9 +113,6 @@ def test_pg_10_waldir(self): """ test group access for PG >= 11 """ - if self.pg_config_version < self.version_to_num('10.0'): - return unittest.skip('You need PostgreSQL >= 10 for this test') - fname = self.id().split('.')[3] wal_dir = os.path.join( os.path.join(self.tmp_path, module_name, fname), 'wal_dir') diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 0fa252739..8e24dd279 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -547,13 +547,7 @@ def get_md5_per_page_for_fork(self, file, size_in_pages): def get_ptrack_bits_per_page_for_fork(self, node, file, size=[]): - if self.get_pgpro_edition(node) == 'enterprise': - if self.get_version(node) < self.version_to_num('10.0'): - header_size = 48 - else: - header_size = 24 - else: - header_size = 24 + header_size = 24 ptrack_bits_for_fork = [] # TODO: use macro instead of hard coded 8KB @@ -1517,25 +1511,15 @@ def version_to_num(self, version): def switch_wal_segment(self, node): """ - Execute pg_switch_wal/xlog() in given node + Execute pg_switch_wal() in given node Args: node: an instance of PostgresNode or NodeConnection class """ if isinstance(node, testgres.PostgresNode): - if self.version_to_num( - node.safe_psql('postgres', 'show server_version').decode('utf-8') - ) >= self.version_to_num('10.0'): - node.safe_psql('postgres', 'select pg_switch_wal()') - else: - node.safe_psql('postgres', 'select pg_switch_xlog()') + node.safe_psql('postgres', 'select pg_switch_wal()') else: - if self.version_to_num( - node.execute('show server_version')[0][0] - ) >= self.version_to_num('10.0'): - node.execute('select pg_switch_wal()') - else: - node.execute('select pg_switch_xlog()') + node.execute('select pg_switch_wal()') sleep(1) @@ -1545,12 +1529,8 @@ def wait_until_replica_catch_with_master(self, master, replica): 'postgres', 'show server_version').decode('utf-8').rstrip() - if self.version_to_num(version) >= self.version_to_num('10.0'): - master_function = 'pg_catalog.pg_current_wal_lsn()' - replica_function = 'pg_catalog.pg_last_wal_replay_lsn()' - else: - master_function = 'pg_catalog.pg_current_xlog_location()' - replica_function = 'pg_catalog.pg_last_xlog_replay_location()' + master_function = 'pg_catalog.pg_current_wal_lsn()' + replica_function = 'pg_catalog.pg_last_wal_replay_lsn()' lsn = master.safe_psql( 'postgres', diff --git a/tests/incr_restore.py b/tests/incr_restore.py index cb684a23a..b3a2ce4a6 100644 --- a/tests/incr_restore.py +++ b/tests/incr_restore.py @@ -1492,11 +1492,6 @@ def test_make_replica_via_incr_checksum_restore(self): set_replication=True, initdb_params=['--data-checksums']) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', master) self.set_archiving(backup_dir, 'node', master, replica=True) @@ -1565,11 +1560,6 @@ def test_make_replica_via_incr_lsn_restore(self): set_replication=True, initdb_params=['--data-checksums']) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', master) self.set_archiving(backup_dir, 'node', master, replica=True) diff --git a/tests/pgpro2068.py b/tests/pgpro2068.py index 3baa0ba0b..454cac532 100644 --- a/tests/pgpro2068.py +++ b/tests/pgpro2068.py @@ -136,29 +136,7 @@ def test_minrecpoint_on_replica(self): recovery_config, "recovery_target_action = 'pause'") replica.slow_start(replica=True) - if self.get_version(node) < 100000: - script = ''' -DO -$$ -relations = plpy.execute("select class.oid from pg_class class WHERE class.relkind IN ('r', 'i', 't', 'm') and class.relpersistence = 'p'") -current_xlog_lsn = plpy.execute("SELECT min_recovery_end_location as lsn FROM pg_control_recovery()")[0]['lsn'] -plpy.notice('CURRENT LSN: {0}'.format(current_xlog_lsn)) -found_corruption = False -for relation in relations: - pages_from_future = plpy.execute("with number_of_blocks as (select blknum from generate_series(0, pg_relation_size({0}) / 8192 -1) as blknum) select blknum, lsn, checksum, flags, lower, upper, special, pagesize, version, prune_xid from number_of_blocks, page_header(get_raw_page('{0}'::oid::regclass::text, number_of_blocks.blknum::int)) where lsn > '{1}'::pg_lsn".format(relation['oid'], current_xlog_lsn)) - - if pages_from_future.nrows() == 0: - continue - - for page in pages_from_future: - plpy.notice('Found page from future. OID: {0}, BLKNUM: {1}, LSN: {2}'.format(relation['oid'], page['blknum'], page['lsn'])) - found_corruption = True -if found_corruption: - plpy.error('Found Corruption') -$$ LANGUAGE plpython3u; -''' - else: - script = ''' + script = ''' DO $$ relations = plpy.execute("select class.oid from pg_class class WHERE class.relkind IN ('r', 'i', 't', 'm') and class.relpersistence = 'p'") diff --git a/tests/pgpro560.py b/tests/pgpro560.py index 53c7914a2..ffda7b5ee 100644 --- a/tests/pgpro560.py +++ b/tests/pgpro560.py @@ -84,20 +84,12 @@ def test_pgpro560_systemid_mismatch(self): "Expecting Error because of SYSTEM ID mismatch.\n " "Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd)) except ProbackupException as e: - if self.get_version(node1) > 90600: - self.assertTrue( - 'ERROR: Backup data directory was ' - 'initialized for system id' in e.message and - 'but connected instance system id is' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - else: - self.assertIn( - 'ERROR: System identifier mismatch. ' - 'Connected PostgreSQL instance has system id', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.assertTrue( + 'ERROR: Backup data directory was ' + 'initialized for system id' in e.message and + 'but connected instance system id is' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) sleep(1) @@ -111,20 +103,12 @@ def test_pgpro560_systemid_mismatch(self): "Expecting Error because of of SYSTEM ID mismatch.\n " "Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd)) except ProbackupException as e: - if self.get_version(node1) > 90600: - self.assertTrue( - 'ERROR: Backup data directory was initialized ' - 'for system id' in e.message and - 'but connected instance system id is' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - else: - self.assertIn( - 'ERROR: System identifier mismatch. ' - 'Connected PostgreSQL instance has system id', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.assertTrue( + 'ERROR: Backup data directory was initialized ' + 'for system id' in e.message and + 'but connected instance system id is' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) # Clean after yourself self.del_test_dir(module_name, fname) diff --git a/tests/ptrack.py b/tests/ptrack.py index 08ea90f8d..19df9ff16 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -511,114 +511,41 @@ def test_ptrack_unprivileged(self): "postgres", "CREATE DATABASE backupdb") - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # >= 10 - else: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) + # PG >= 10 + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) node.safe_psql( "backupdb", diff --git a/tests/replica.py b/tests/replica.py index acf655aac..4bcfa6083 100644 --- a/tests/replica.py +++ b/tests/replica.py @@ -28,11 +28,6 @@ def test_replica_switchover(self): set_replication=True, initdb_params=['--data-checksums']) - if self.get_version(node1) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node1', node1) @@ -105,10 +100,6 @@ def test_replica_stream_ptrack_backup(self): if not self.ptrack: return unittest.skip('Skipped because ptrack support is disabled') - if self.pg_config_version > self.version_to_num('9.6.0'): - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') master = self.make_simple_node( @@ -239,11 +230,6 @@ def test_replica_archive_page_backup(self): 'checkpoint_timeout': '30s', 'max_wal_size': '32MB'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) self.set_archiving(backup_dir, 'master', master) @@ -381,11 +367,6 @@ def test_basic_make_replica_via_restore(self): pg_options={ 'archive_timeout': '10s'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) self.set_archiving(backup_dir, 'master', master) @@ -439,11 +420,6 @@ def test_take_backup_from_delayed_replica(self): initdb_params=['--data-checksums'], pg_options={'archive_timeout': '10s'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) self.set_archiving(backup_dir, 'master', master) @@ -552,11 +528,6 @@ def test_replica_promote(self): 'checkpoint_timeout': '30s', 'max_wal_size': '32MB'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) self.set_archiving(backup_dir, 'master', master) @@ -643,11 +614,6 @@ def test_replica_stop_lsn_null_offset(self): 'checkpoint_timeout': '1h', 'wal_level': 'replica'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', master) self.set_archiving(backup_dir, 'node', master) @@ -728,11 +694,6 @@ def test_replica_stop_lsn_null_offset_next_record(self): 'checkpoint_timeout': '1h', 'wal_level': 'replica'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) self.set_archiving(backup_dir, 'master', master) @@ -830,11 +791,6 @@ def test_archive_replica_null_offset(self): 'checkpoint_timeout': '1h', 'wal_level': 'replica'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', master) self.set_archiving(backup_dir, 'node', master) @@ -914,11 +870,6 @@ def test_archive_replica_not_null_offset(self): 'checkpoint_timeout': '1h', 'wal_level': 'replica'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', master) self.set_archiving(backup_dir, 'node', master) @@ -1003,11 +954,6 @@ def test_replica_toast(self): 'wal_level': 'replica', 'shared_buffers': '128MB'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) self.set_archiving(backup_dir, 'master', master) @@ -1105,11 +1051,6 @@ def test_start_stop_lsn_in_the_same_segno(self): 'wal_level': 'replica', 'shared_buffers': '128MB'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -1183,11 +1124,6 @@ def test_replica_promote_1(self): 'checkpoint_timeout': '1h', 'wal_level': 'replica'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) # set replica True, so archive_mode 'always' is used. @@ -1310,11 +1246,6 @@ def test_replica_promote_archive_delta(self): 'checkpoint_timeout': '30s', 'archive_timeout': '30s'}) - if self.get_version(node1) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node1) self.set_config( @@ -1435,11 +1366,6 @@ def test_replica_promote_archive_page(self): 'checkpoint_timeout': '30s', 'archive_timeout': '30s'}) - if self.get_version(node1) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node1) self.set_archiving(backup_dir, 'node', node1) @@ -1557,11 +1483,6 @@ def test_parent_choosing(self): set_replication=True, initdb_params=['--data-checksums']) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) @@ -1708,11 +1629,7 @@ def test_replica_via_basebackup(self): # restore stream backup self.restore_node(backup_dir, 'node', node) - xlog_dir = 'pg_wal' - if self.get_version(node) < 100000: - xlog_dir = 'pg_xlog' - - filepath = os.path.join(node.data_dir, xlog_dir, "00000002.history") + filepath = os.path.join(node.data_dir, 'pg_wal', "00000002.history") self.assertTrue( os.path.exists(filepath), "History file do not exists: {0}".format(filepath)) diff --git a/tests/restore.py b/tests/restore.py index 5a00bc23b..9c300d232 100644 --- a/tests/restore.py +++ b/tests/restore.py @@ -361,10 +361,6 @@ def test_restore_to_lsn_inclusive(self): base_dir=os.path.join(module_name, fname, 'node'), initdb_params=['--data-checksums']) - if self.get_version(node) < self.version_to_num('10.0'): - self.del_test_dir(module_name, fname) - return - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -432,10 +428,6 @@ def test_restore_to_lsn_not_inclusive(self): base_dir=os.path.join(module_name, fname, 'node'), initdb_params=['--data-checksums']) - if self.get_version(node) < self.version_to_num('10.0'): - self.del_test_dir(module_name, fname) - return - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -2146,10 +2138,7 @@ def test_restore_target_new_options(self): with node.connect("postgres") as con: con.execute("INSERT INTO tbl0005 VALUES (1)") con.commit() - if self.get_version(node) > self.version_to_num('10.0'): - res = con.execute("SELECT pg_current_wal_lsn()") - else: - res = con.execute("SELECT pg_current_xlog_location()") + res = con.execute("SELECT pg_current_wal_lsn()") con.commit() con.execute("INSERT INTO tbl0005 VALUES (2)") @@ -2240,33 +2229,32 @@ def test_restore_target_new_options(self): node.slow_start() # Restore with recovery target lsn - if self.get_version(node) >= 100000: - node.cleanup() - self.restore_node( - backup_dir, 'node', node, - options=[ - '--recovery-target-lsn={0}'.format(target_lsn), - "--recovery-target-action=promote", - '--recovery-target-timeline=1', - ]) + node.cleanup() + self.restore_node( + backup_dir, 'node', node, + options=[ + '--recovery-target-lsn={0}'.format(target_lsn), + "--recovery-target-action=promote", + '--recovery-target-timeline=1', + ]) - with open(recovery_conf, 'r') as f: - recovery_conf_content = f.read() + with open(recovery_conf, 'r') as f: + recovery_conf_content = f.read() - self.assertIn( - "recovery_target_lsn = '{0}'".format(target_lsn), - recovery_conf_content) + self.assertIn( + "recovery_target_lsn = '{0}'".format(target_lsn), + recovery_conf_content) - self.assertIn( - "recovery_target_action = 'promote'", - recovery_conf_content) + self.assertIn( + "recovery_target_action = 'promote'", + recovery_conf_content) - self.assertIn( - "recovery_target_timeline = '1'", - recovery_conf_content) + self.assertIn( + "recovery_target_timeline = '1'", + recovery_conf_content) - node.slow_start() + node.slow_start() # Clean after yourself self.del_test_dir(module_name, fname) @@ -3197,117 +3185,42 @@ def test_missing_database_map(self): "postgres", "CREATE DATABASE backupdb") - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # >= 10 - else: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) + # PG >= 10 + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) if self.ptrack: # TODO why backup works without these grants ? diff --git a/tests/retention.py b/tests/retention.py index b0399a239..7bfff6b28 100644 --- a/tests/retention.py +++ b/tests/retention.py @@ -1575,11 +1575,6 @@ def test_window_error_backups_2(self): self.show_pb(backup_dir, 'node')[1]['id'] - if self.get_version(node) < 90600: - node.safe_psql( - 'postgres', - 'SELECT pg_catalog.pg_stop_backup()') - # Take DELTA backup self.backup_node( backup_dir, 'node', node, backup_type='delta', @@ -1599,10 +1594,6 @@ def test_retention_redundancy_overlapping_chains(self): base_dir=os.path.join(module_name, fname, 'node'), initdb_params=['--data-checksums']) - if self.get_version(node) < 90600: - self.del_test_dir(module_name, fname) - return unittest.skip('Skipped because ptrack support is disabled') - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1649,10 +1640,6 @@ def test_retention_redundancy_overlapping_chains_1(self): base_dir=os.path.join(module_name, fname, 'node'), initdb_params=['--data-checksums']) - if self.get_version(node) < 90600: - self.del_test_dir(module_name, fname) - return unittest.skip('Skipped because ptrack support is disabled') - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) diff --git a/tests/validate.py b/tests/validate.py index 22a03c3be..7cdc0e92e 100644 --- a/tests/validate.py +++ b/tests/validate.py @@ -1757,14 +1757,9 @@ def test_validate_corrupt_wal_between_backups(self): con.commit() target_xid = res[0][0] - if self.get_version(node) < self.version_to_num('10.0'): - walfile = node.safe_psql( - 'postgres', - 'select pg_xlogfile_name(pg_current_xlog_location())').decode('utf-8').rstrip() - else: - walfile = node.safe_psql( - 'postgres', - 'select pg_walfile_name(pg_current_wal_lsn())').decode('utf-8').rstrip() + walfile = node.safe_psql( + 'postgres', + 'select pg_walfile_name(pg_current_wal_lsn())').decode('utf-8').rstrip() if self.archive_compress: walfile = walfile + '.gz' @@ -3506,12 +3501,8 @@ def test_corrupt_pg_control_via_resetxlog(self): backup_id = self.backup_node(backup_dir, 'node', node) - if self.get_version(node) < 100000: - pg_resetxlog_path = self.get_bin_path('pg_resetxlog') - wal_dir = 'pg_xlog' - else: - pg_resetxlog_path = self.get_bin_path('pg_resetwal') - wal_dir = 'pg_wal' + pg_resetxlog_path = self.get_bin_path('pg_resetwal') + wal_dir = 'pg_wal' os.mkdir( os.path.join( From 5ed469d500969be554bdc906ecfd3cb368d8372d Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Wed, 20 Jul 2022 03:08:01 +0300 Subject: [PATCH 034/339] [PBCKP-232] remove depricated options (master-db, master-host, master-port, master-user, replica-timeout) part 1 --- src/configure.c | 54 ------------------------ src/help.c | 16 +------ src/pg_probackup.h | 3 -- tests/archive.py | 9 ---- tests/ptrack.py | 102 +++++++-------------------------------------- tests/replica.py | 24 ++--------- 6 files changed, 21 insertions(+), 187 deletions(-) diff --git a/src/configure.c b/src/configure.c index 3871aa8b9..47433346f 100644 --- a/src/configure.c +++ b/src/configure.c @@ -90,32 +90,6 @@ ConfigOption instance_options[] = &instance_config.conn_opt.pguser, SOURCE_CMD, 0, OPTION_CONN_GROUP, 0, option_get_value }, - /* Replica options */ - { - 's', 202, "master-db", - &instance_config.master_conn_opt.pgdatabase, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value - }, - { - 's', 203, "master-host", - &instance_config.master_conn_opt.pghost, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value - }, - { - 's', 204, "master-port", - &instance_config.master_conn_opt.pgport, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value - }, - { - 's', 205, "master-user", - &instance_config.master_conn_opt.pguser, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value - }, - { - 'u', 206, "replica-timeout", - &instance_config.replica_timeout, SOURCE_CMD, SOURCE_DEFAULT, - OPTION_REPLICA_GROUP, OPTION_UNIT_S, option_get_value - }, /* Archive options */ { 'u', 207, "archive-timeout", @@ -362,8 +336,6 @@ init_config(InstanceConfig *config, const char *instance_name) config->xlog_seg_size = XLOG_SEG_SIZE; #endif - config->replica_timeout = REPLICA_TIMEOUT_DEFAULT; - config->archive_timeout = ARCHIVE_TIMEOUT_DEFAULT; /* Copy logger defaults */ @@ -437,32 +409,6 @@ readInstanceConfigFile(InstanceState *instanceState) &instance->conn_opt.pguser, SOURCE_CMD, 0, OPTION_CONN_GROUP, 0, option_get_value }, - /* Replica options */ - { - 's', 202, "master-db", - &instance->master_conn_opt.pgdatabase, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value - }, - { - 's', 203, "master-host", - &instance->master_conn_opt.pghost, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value - }, - { - 's', 204, "master-port", - &instance->master_conn_opt.pgport, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value - }, - { - 's', 205, "master-user", - &instance->master_conn_opt.pguser, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value - }, - { - 'u', 206, "replica-timeout", - &instance->replica_timeout, SOURCE_CMD, SOURCE_DEFAULT, - OPTION_REPLICA_GROUP, OPTION_UNIT_S, option_get_value - }, /* Archive options */ { 'u', 207, "archive-timeout", diff --git a/src/help.c b/src/help.c index b22fa912e..14ed38bc8 100644 --- a/src/help.c +++ b/src/help.c @@ -2,7 +2,7 @@ * * help.c * - * Copyright (c) 2017-2021, Postgres Professional + * Copyright (c) 2017-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -416,13 +416,6 @@ help_backup(void) printf(_(" --remote-user=username user name for ssh connection (default: current user)\n")); printf(_(" --ssh-options=ssh_options additional ssh options (default: none)\n")); printf(_(" (example: --ssh-options='-c cipher_spec -F configfile')\n")); - - printf(_("\n Replica options:\n")); - printf(_(" --master-user=user_name user name to connect to master (deprecated)\n")); - printf(_(" --master-db=db_name database to connect to master (deprecated)\n")); - printf(_(" --master-host=host_name database server host of master (deprecated)\n")); - printf(_(" --master-port=port database server port of master (deprecated)\n")); - printf(_(" --replica-timeout=timeout wait timeout for WAL segment streaming through replication (deprecated)\n\n")); } static void @@ -878,13 +871,6 @@ help_set_config(void) printf(_(" --archive-host=destination address or hostname for ssh connection to archive host\n")); printf(_(" --archive-port=port port for ssh connection to archive host (default: 22)\n")); printf(_(" --archive-user=username user name for ssh connection to archive host (default: PostgreSQL user)\n")); - - printf(_("\n Replica options:\n")); - printf(_(" --master-user=user_name user name to connect to master (deprecated)\n")); - printf(_(" --master-db=db_name database to connect to master (deprecated)\n")); - printf(_(" --master-host=host_name database server host of master (deprecated)\n")); - printf(_(" --master-port=port database server port of master (deprecated)\n")); - printf(_(" --replica-timeout=timeout wait timeout for WAL segment streaming through replication (deprecated)\n\n")); } static void diff --git a/src/pg_probackup.h b/src/pg_probackup.h index eb051065b..8e9d1568f 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -378,9 +378,6 @@ typedef struct InstanceConfig char *external_dir_str; ConnectionOptions conn_opt; - ConnectionOptions master_conn_opt; - - uint32 replica_timeout; //Deprecated. Not used anywhere /* Wait timeout for WAL segment archiving */ uint32 archive_timeout; diff --git a/tests/archive.py b/tests/archive.py index fe3d89b17..be5e33fbc 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -725,9 +725,6 @@ def test_replica_archive(self): backup_dir, 'replica', replica, options=[ '--archive-timeout=30', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), '--stream']) self.validate_pb(backup_dir, 'replica') @@ -764,9 +761,6 @@ def test_replica_archive(self): replica, backup_type='page', options=[ '--archive-timeout=60', - '--master-db=postgres', - '--master-host=localhost', - '--master-port={0}'.format(master.port), '--stream']) self.validate_pb(backup_dir, 'replica') @@ -857,9 +851,6 @@ def test_master_and_replica_parallel_archiving(self): backup_dir, 'replica', replica, options=[ '--archive-timeout=30', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), '--stream']) self.validate_pb(backup_dir, 'replica') diff --git a/tests/ptrack.py b/tests/ptrack.py index 19df9ff16..7dec55cc7 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -1560,13 +1560,7 @@ def test_create_db_on_replica(self): self.backup_node( backup_dir, 'replica', replica, - options=[ - '-j10', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(node.port), - '--stream' - ] + options=['-j10', '--stream'] ) # CREATE DATABASE DB1 @@ -1584,13 +1578,7 @@ def test_create_db_on_replica(self): backup_id = self.backup_node( backup_dir, 'replica', replica, backup_type='ptrack', - options=[ - '-j10', - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(node.port) - ] + options=['-j10', '--stream'] ) if self.paranoia: @@ -2304,11 +2292,7 @@ def test_ptrack_clean_replica(self): backup_dir, 'replica', replica, - options=[ - '-j10', '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['-j10', '--stream']) master.safe_psql('postgres', 'checkpoint') for i in idx_ptrack: @@ -2335,11 +2319,7 @@ def test_ptrack_clean_replica(self): 'replica', replica, backup_type='ptrack', - options=[ - '-j10', '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['-j10', '--stream']) master.safe_psql('postgres', 'checkpoint') for i in idx_ptrack: @@ -2367,11 +2347,7 @@ def test_ptrack_clean_replica(self): 'replica', replica, backup_type='page', - options=[ - '-j10', '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), - '--stream']) + options=['-j10', '--stream']) master.safe_psql('postgres', 'checkpoint') for i in idx_ptrack: @@ -2437,8 +2413,7 @@ def test_ptrack_cluster_on_btree(self): idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) - self.backup_node( - backup_dir, 'node', node, options=['-j10', '--stream']) + self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream']) node.safe_psql('postgres', 'delete from t_heap where id%2 = 1') node.safe_psql('postgres', 'cluster t_heap using t_btree') @@ -2573,11 +2548,7 @@ def test_ptrack_cluster_on_btree_replica(self): master.safe_psql('postgres', 'vacuum t_heap') master.safe_psql('postgres', 'checkpoint') - self.backup_node( - backup_dir, 'replica', replica, options=[ - '-j10', '--stream', '--master-host=localhost', - '--master-db=postgres', '--master-port={0}'.format( - master.port)]) + self.backup_node(backup_dir, 'replica', replica, options=['-j10', '--stream']) for i in idx_ptrack: # get size of heap and indexes. size calculated in pages @@ -2674,9 +2645,7 @@ def test_ptrack_cluster_on_gist_replica(self): self.backup_node( backup_dir, 'replica', replica, options=[ - '-j10', '--stream', '--master-host=localhost', - '--master-db=postgres', '--master-port={0}'.format( - master.port)]) + '-j10', '--stream']) for i in idx_ptrack: # get size of heap and indexes. size calculated in pages @@ -2844,11 +2813,7 @@ def test_ptrack_empty_replica(self): backup_dir, 'replica', replica, - options=[ - '-j10', '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['-j10', '--stream']) # Create indexes for i in idx_ptrack: @@ -2868,11 +2833,7 @@ def test_ptrack_empty_replica(self): 'replica', replica, backup_type='ptrack', - options=[ - '-j1', '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['-j1', '--stream']) if self.paranoia: pgdata = self.pgdata_content(replica.data_dir) @@ -3041,12 +3002,7 @@ def test_basic_ptrack_truncate_replica(self): # Make backup to clean every ptrack self.backup_node( backup_dir, 'replica', replica, - options=[ - '-j10', - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['-j10', '--stream']) if replica.major_version < 11: for i in idx_ptrack: @@ -3070,12 +3026,7 @@ def test_basic_ptrack_truncate_replica(self): self.backup_node( backup_dir, 'replica', replica, backup_type='ptrack', - options=[ - '-j10', - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['-j10', '--stream']) pgdata = self.pgdata_content(replica.data_dir) @@ -3245,12 +3196,7 @@ def test_ptrack_vacuum_replica(self): replica.safe_psql('postgres', 'checkpoint') # Make FULL backup to clean every ptrack - self.backup_node( - backup_dir, 'replica', replica, options=[ - '-j10', '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), - '--stream']) + self.backup_node(backup_dir, 'replica', replica, options=['-j10', '--stream']) if replica.major_version < 11: for i in idx_ptrack: @@ -3430,12 +3376,7 @@ def test_ptrack_vacuum_bits_frozen_replica(self): # Take backup to clean every ptrack self.backup_node( backup_dir, 'replica', replica, - options=[ - '-j10', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), - '--stream']) + options=['-j10', '--stream']) if replica.major_version < 11: for i in idx_ptrack: @@ -3688,12 +3629,7 @@ def test_ptrack_vacuum_full_replica(self): # Take FULL backup to clean every ptrack self.backup_node( backup_dir, 'replica', replica, - options=[ - '-j10', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), - '--stream']) + options=['-j10', '--stream']) if replica.major_version < 11: for i in idx_ptrack: @@ -3860,13 +3796,7 @@ def test_ptrack_vacuum_truncate_replica(self): # Take FULL backup to clean every ptrack self.backup_node( backup_dir, 'replica', replica, - options=[ - '-j10', - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port) - ] + options=['-j10', '--stream'] ) if master.major_version < 11: diff --git a/tests/replica.py b/tests/replica.py index 4bcfa6083..4fe009062 100644 --- a/tests/replica.py +++ b/tests/replica.py @@ -152,11 +152,7 @@ def test_replica_stream_ptrack_backup(self): backup_id = self.backup_node( backup_dir, 'replica', replica, - options=[ - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['--stream']) self.validate_pb(backup_dir, 'replica') self.assertEqual( 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) @@ -188,11 +184,7 @@ def test_replica_stream_ptrack_backup(self): backup_id = self.backup_node( backup_dir, 'replica', replica, backup_type='ptrack', - options=[ - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['--stream']) self.validate_pb(backup_dir, 'replica') self.assertEqual( 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) @@ -279,11 +271,7 @@ def test_replica_archive_page_backup(self): backup_id = self.backup_node( backup_dir, 'replica', replica, - options=[ - '--archive-timeout=60', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['--archive-timeout=60']) self.validate_pb(backup_dir, 'replica') self.assertEqual( @@ -315,11 +303,7 @@ def test_replica_archive_page_backup(self): backup_id = self.backup_node( backup_dir, 'replica', replica, backup_type='page', - options=[ - '--archive-timeout=60', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['--archive-timeout=60']) pgbench.wait() From a880b9165b4e0f89b0ae798f2b15aafadcb02a0b Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Wed, 20 Jul 2022 03:13:35 +0300 Subject: [PATCH 035/339] [PBCKP-232] makefile simplification --- Makefile | 26 ++++++-------------------- get_pg_version.mk | 36 ------------------------------------ 2 files changed, 6 insertions(+), 56 deletions(-) delete mode 100644 get_pg_version.mk diff --git a/Makefile b/Makefile index 3753d9cb7..a1b1ebed3 100644 --- a/Makefile +++ b/Makefile @@ -17,21 +17,17 @@ # git clone https://github.com/postgrespro/pg_probackup postgresql/contrib/pg_probackup # cd postgresql # ./configure ... && make -# make --no-print-directory -C contrib/pg_probackup +# make -C contrib/pg_probackup # # 4. out of PG source and without PGXS # git clone https://git.postgresql.org/git/postgresql.git postgresql-src # git clone https://github.com/postgrespro/pg_probackup postgresql-src/contrib/pg_probackup # mkdir postgresql-build && cd postgresql-build # ../postgresql-src/configure ... && make -# make --no-print-directory -C contrib/pg_probackup +# make -C contrib/pg_probackup # top_pbk_srcdir := $(dir $(realpath $(firstword $(MAKEFILE_LIST)))) -# get postgres version -PG_MAJORVER != $(MAKE) USE_PGXS=$(USE_PGXS) PG_CONFIG=$(PG_CONFIG) --silent --makefile=$(top_pbk_srcdir)get_pg_version.mk -#$(info Making with PG_MAJORVER=$(PG_MAJORVER)) - PROGRAM := pg_probackup # pg_probackup sources @@ -47,18 +43,14 @@ OBJS += src/archive.o src/backup.o src/catalog.o src/checkdb.o src/configure.o s BORROWED_H_SRC := \ src/include/portability/instr_time.h \ src/bin/pg_basebackup/receivelog.h \ - src/bin/pg_basebackup/streamutil.h + src/bin/pg_basebackup/streamutil.h \ + src/bin/pg_basebackup/walmethods.h BORROWED_C_SRC := \ src/backend/access/transam/xlogreader.c \ src/backend/utils/hash/pg_crc.c \ src/bin/pg_basebackup/receivelog.c \ - src/bin/pg_basebackup/streamutil.c -ifneq ($(PG_MAJORVER), $(findstring $(PG_MAJORVER), 9.5 9.6)) -BORROWED_H_SRC += \ - src/bin/pg_basebackup/walmethods.h -BORROWED_C_SRC += \ + src/bin/pg_basebackup/streamutil.c \ src/bin/pg_basebackup/walmethods.c -endif BORROW_DIR := src/borrowed BORROWED_H := $(addprefix $(BORROW_DIR)/, $(notdir $(BORROWED_H_SRC))) @@ -84,9 +76,6 @@ include $(top_builddir)/src/Makefile.global include $(top_srcdir)/contrib/contrib-global.mk endif -# now we can use standard MAJORVERSION variable instead of calculated PG_MAJORVER -undefine PG_MAJORVER - # PG_CPPFLAGS = -I$(libpq_srcdir) ${PTHREAD_CFLAGS} -I$(top_pbk_srcdir)src -I$(BORROW_DIR) PG_CPPFLAGS += -I$(top_pbk_srcdir)src/fu_util -Wno-declaration-after-statement @@ -99,11 +88,8 @@ PG_LIBS_INTERNAL = $(libpq_pgport) ${PTHREAD_CFLAGS} # additional dependencies on borrowed files src/archive.o: $(BORROW_DIR)/instr_time.h src/backup.o src/catchup.o src/pg_probackup.o: $(BORROW_DIR)/streamutil.h -src/stream.o $(BORROW_DIR)/receivelog.o $(BORROW_DIR)/streamutil.o: $(BORROW_DIR)/receivelog.h -ifneq ($(MAJORVERSION), $(findstring $(MAJORVERSION), 9.5 9.6)) +src/stream.o $(BORROW_DIR)/receivelog.o $(BORROW_DIR)/streamutil.o $(BORROW_DIR)/walmethods.o: $(BORROW_DIR)/receivelog.h $(BORROW_DIR)/receivelog.h: $(BORROW_DIR)/walmethods.h -$(BORROW_DIR)/walmethods.o: $(BORROW_DIR)/receivelog.h -endif # generate separate makefile to handle borrowed files borrowed.mk: $(firstword $(MAKEFILE_LIST)) diff --git a/get_pg_version.mk b/get_pg_version.mk deleted file mode 100644 index d5468c5bb..000000000 --- a/get_pg_version.mk +++ /dev/null @@ -1,36 +0,0 @@ -# pg_probackup build system -# -# When building pg_probackup, there is a chicken and egg problem: -# 1. We have to define the OBJS list before including the PG makefiles. -# 2. To define this list, we need to know the PG major version. -# 3. But we can find out the postgres version only after including makefiles. -# -# This minimal makefile solves this problem, its only purpose is to -# calculate the version number from which the main build will occur next. -# -# Usage: -# include this line into main makefile -# PG_MAJORVER != $(MAKE) USE_PGXS=$(USE_PGXS) PG_CONFIG=$(PG_CONFIG) --silent --makefile=get_pg_version.mk -# -# Known issues: -# When parent make called with -C and without --no-print-directory, then -# 'make: Leaving directory ...' string will be added (by caller make process) to PG_MAJORVER -# (at least with GNU Make 4.2.1) -# -.PHONY: get_pg_version -get_pg_version: - -ifdef USE_PGXS -PG_CONFIG = pg_config -PGXS := $(shell $(PG_CONFIG) --pgxs) -include $(PGXS) -else -subdir = contrib/pg_probackup -top_builddir = ../.. -include $(top_builddir)/src/Makefile.global -include $(top_srcdir)/contrib/contrib-global.mk -endif - -get_pg_version: - $(info $(MAJORVERSION)) - From c44550dd36682a1ad588db9d0629057bf059b674 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 26 Jul 2022 23:29:10 +0300 Subject: [PATCH 036/339] fu_util: some simplification and optimization. --- Makefile | 3 +- src/fu_util/fm_util.h | 255 ++++++----- src/fu_util/fo_obj.h | 45 +- src/fu_util/ft_array.inc.h | 2 +- src/fu_util/ft_search.inc.h | 4 +- src/fu_util/ft_sort.inc.h | 4 +- src/fu_util/ft_util.h | 12 +- src/fu_util/impl/fo_impl.c | 130 ++++-- src/fu_util/impl/fo_impl.h | 594 +++++++++---------------- src/fu_util/impl/fo_impl2.h | 61 ++- src/fu_util/impl/ft_impl.h | 13 +- src/fu_util/test/obj1.c | 36 +- src/fu_util/test/qsort/sort_template.h | 3 +- src/pg_probackup.h | 4 - src/utils/file.c | 8 +- src/utils/file.h | 4 + 16 files changed, 590 insertions(+), 588 deletions(-) diff --git a/Makefile b/Makefile index 3753d9cb7..6e2aa8ded 100644 --- a/Makefile +++ b/Makefile @@ -37,7 +37,6 @@ PROGRAM := pg_probackup # pg_probackup sources OBJS := src/utils/configuration.o src/utils/json.o src/utils/logger.o \ src/utils/parray.o src/utils/pgut.o src/utils/thread.o src/utils/remote.o src/utils/file.o -OBJS += src/fu_util/impl/ft_impl.o src/fu_util/impl/fo_impl.o OBJS += src/archive.o src/backup.o src/catalog.o src/checkdb.o src/configure.o src/data.o \ src/delete.o src/dir.o src/fetch.o src/help.o src/init.o src/merge.o \ src/parsexlog.o src/ptrack.o src/pg_probackup.o src/restore.o src/show.o src/stream.o \ @@ -60,6 +59,8 @@ BORROWED_C_SRC += \ src/bin/pg_basebackup/walmethods.c endif +OBJS += src/fu_util/impl/ft_impl.o src/fu_util/impl/fo_impl.o + BORROW_DIR := src/borrowed BORROWED_H := $(addprefix $(BORROW_DIR)/, $(notdir $(BORROWED_H_SRC))) BORROWED_C := $(addprefix $(BORROW_DIR)/, $(notdir $(BORROWED_C_SRC))) diff --git a/src/fu_util/fm_util.h b/src/fu_util/fm_util.h index 54eb0b2b8..11d96682d 100644 --- a/src/fu_util/fm_util.h +++ b/src/fu_util/fm_util.h @@ -2,19 +2,28 @@ #ifndef FM_UTIL_H #define FM_UTIL_H -#define fm_cat_impl(x, y) x##y -#define fm_cat(x, y) fm_cat_impl(x, y) -#define fm_cat3_impl(x, y, z) x##y##z -#define fm_cat3(x, y, z) fm_cat3_impl(x, y, z) -#define fm_cat4_impl(w, x, y, z) w##x##y##z -#define fm_cat4(w, x, y, z) fm_cat4_impl(w, x, y, z) -#define fm_str_impl(...) #__VA_ARGS__ -#define fm_str(...) fm_str_impl(__VA_ARGS__) +#define fm_cat(x, y) fm__cat(x, y) +#define fm__cat(x, y) x##y +#define fm_cat3(x, y, z) fm__cat3(x, y, z) +#define fm__cat3(x, y, z) x##y##z +#define fm_cat4(w, x, y, z) fm__cat4(w, x, y, z) +#define fm__cat4(w, x, y, z) w##x##y##z +#define fm_str(...) fm__str(__VA_ARGS__) +#define fm__str(...) #__VA_ARGS__ #define fm_uniq(x) fm_cat(_##x##_, __COUNTER__) #define fm_expand(...) __VA_ARGS__ #define fm_empty(...) +#define fm_comma(...) , +#define fm__comma , + +#define fm_apply(macro, ...) \ + macro(__VA_ARGS__) + +/****************************************/ +// LOGIC + #define fm_compl(v) fm_cat(fm_compl_, v) #define fm_compl_0 1 #define fm_compl_1 0 @@ -54,59 +63,69 @@ #define fm__iif_1(...) __VA_ARGS__ fm_empty #define fm__iif_0(...) fm_expand -#define fm_va_comma(...) \ - fm_cat(fm__va_comma_, fm_va_01(__VA_ARGS__))() -#define fm__va_comma_0() -#define fm__va_comma_1() , - -#define fm_or_default(...) \ - fm_iif(fm_va_01(__VA_ARGS__))(__VA_ARGS__) +/****************************************/ +// COMPARISON -#define fm__primitive_compare(x, y) fm_is_tuple(COMPARE_##x(COMPARE_##y)(())) -#define fm__is_comparable(x) fm_is_tuple(fm_cat(COMPARE_,x)(())) -#define fm_not_equal(x, y) \ - fm_if(fm_and(fm__is_comparable(x),fm__is_comparable(y)), fm__primitive_compare, 1 fm_empty)(x, y) #define fm_equal(x, y) \ fm_compl(fm_not_equal(x, y)) +#define fm_not_equal(x, y) \ + fm_if(fm_and(fm__is_comparable(x),fm__is_comparable(y)), fm__primitive_compare, 1 fm_empty)(x, y) +#define fm__primitive_compare(x, y) fm_is_tuple(COMPARE_##x(COMPARE_##y)(())) +#define fm__is_comparable(x) fm_is_tuple(fm_cat(COMPARE_,x)(())) -#define fm_comma(...) , -#define fm__comma , +/****************************************/ +// __VA_ARGS__ + +#define fm_head(...) fm__head(__VA_ARGS__) +#define fm__head(x, ...) x +#define fm_tail(...) fm__tail(__VA_ARGS__) +#define fm__tail(x, ...) __VA_ARGS__ + +#define fm_or_default(...) \ + fm_iif(fm_va_01(__VA_ARGS__))(__VA_ARGS__) #define fm_va_single(...) fm__va_single(__VA_ARGS__, fm__comma) #define fm_va_many(...) fm__va_many(__VA_ARGS__, fm__comma) #define fm__va_single(x, y, ...) fm__va_result(y, 1, 0) #define fm__va_many(x, y, ...) fm__va_result(y, 0, 1) #define fm__va_result(x, y, res, ...) res -#if !__STRICT_ANSI__ -#define fm_no_va(...) fm__no_va(__VA_ARGS__) -#define fm__no_va(...) fm_va_single(~, ##__VA_ARGS__) -#define fm_va_01(...) fm__va_01(__VA_ARGS__) -#define fm__va_01(...) fm_va_many(~, ##__VA_ARGS__) -#else #define fm_no_va fm_is_empty #define fm_va_01 fm_isnt_empty +#define fm_va_01n(...) fm_cat3(fm__va_01n_, fm__isnt_empty(__VA_ARGS__), fm_va_many(__VA_ARGS__)) +#define fm__va_01n_00 0 +#define fm__va_01n_10 1 +#define fm__va_01n_11 n + +#if !__STRICT_ANSI__ +#define fm_is_empty(...) fm__is_empty(__VA_ARGS__) +#define fm__is_empty(...) fm_va_single(~, ##__VA_ARGS__) +#define fm_isnt_empty(...) fm__isnt_empty(__VA_ARGS__) +#define fm__isnt_empty(...) fm_va_many(~, ##__VA_ARGS__) +#else +#define fm_is_empty(...) fm_and(fm__is_emptyfirst(__VA_ARGS__), fm_va_single(__VA_ARGS__)) +#define fm_isnt_empty(...) fm_nand(fm__is_emptyfirst(__VA_ARGS__), fm_va_single(__VA_ARGS__)) + +#define fm__is_emptyfirst(x, ...) fm_iif(fm_is_tuple(x))(0)(fm__is_emptyfirst_impl(x)) +#define fm__is_emptyfirst_impl(x,...) fm_tuple_2((\ + fm__is_emptyfirst_do1 x (fm__is_emptyfirst_do2), 1, 0)) +#define fm__is_emptyfirst_do1(F) F() +#define fm__is_emptyfirst_do2(...) , #endif +#define fm_when_isnt_empty(...) fm_cat(fm__when_, fm__isnt_empty(__VA_ARGS__)) +#define fm_va_comma(...) \ + fm_when_isnt_empty(__VA_ARGS__)(fm__comma) +#define fm_va_comma_fun(...) \ + fm_if(fm_va_01(__VA_ARGS__), fm_comma, fm_empty) + + +/****************************************/ +// Tuples + +#define fm_is_tuple(x, ...) fm__is_tuple_(fm__is_tuple_help x, 1, 0) #define fm__is_tuple_choose(a,b,x,...) x #define fm__is_tuple_help(...) , #define fm__is_tuple_(...) fm__is_tuple_choose(__VA_ARGS__) -#define fm_is_tuple(x, ...) fm__is_tuple_(fm__is_tuple_help x, 1, 0) - -#define fm_head(x, ...) x -#define fm_tail(x, ...) __VA_ARGS__ - -#define fm_apply_1(macro, x, ...) \ - macro(x) -#define fm_apply_2(macro, x, y, ...) \ - macro(x, y) -#define fm_apply_3(macro, x, y, z, ...) \ - macro(x, y, z) -#define fm_apply_tuple_1(macro, x, ...) \ - macro x -#define fm_apply_tuple_2(macro, x, y, ...) \ - fm__apply_tuple_2(macro, x, fm_expand y) -#define fm__apply_tuple_2(macro, x, ...) \ - macro(x, __VA_ARGS__) #define fm_tuple_expand(x) fm_expand x #define fm_tuple_tag(x) fm_head x @@ -117,37 +136,18 @@ #define fm_tuple_2(x) fm__tuple_2 x #define fm__tuple_2(_0, _1, _2, ...) _2 +#define fm_tuple_tag_or_0(x) fm__tuple_tag_or_0_(fm__tuple_tag_or_0_help x, 0) +#define fm__tuple_tag_or_0_(...) fm__tuple_tag_or_0_choose(__VA_ARGS__) #define fm__tuple_tag_or_0_choose(a,x,...) x #define fm__tuple_tag_or_0_help(tag, ...) , tag -#define fm__tuple_tag_or_0_(...) fm__tuple_tag_or_0_choose(__VA_ARGS__) -#define fm_tuple_tag_or_0(x) fm__tuple_tag_or_0_(fm__tuple_tag_or_0_help x, 0) #define fm_dispatch_tag_or_0(prefix, x) \ fm_cat(prefix, fm_tuple_tag_or_0(x)) -#define fm_va_012(...) \ - fm_if(fm_no_va(__VA_ARGS__), 0, fm__va_12(__VA_ARGS__)) -#define fm__va_12(...) \ - fm_if(fm_va_single(__VA_ARGS__), 1, 2) - -// recursion handle -#define fm_defer(id) id fm_empty() -#define fm_recurs(id) id fm_empty fm_empty() () -#define fm_recurs2(a,b) fm_cat fm_empty fm_empty() () (a,b) - -#if __STRICT_ANSI__ -#define fm__is_emptyfirst(x, ...) fm_if(fm_is_tuple(x), 0, fm__is_emptyfirst_impl(x)) -#define fm__is_emptyfirst_impl(x,...) fm_tuple_2((\ - fm__is_emptyfirst_do1 x (fm__is_emptyfirst_do2), 1, 0)) -#define fm__is_emptyfirst_do1(F) F() -#define fm__is_emptyfirst_do2(...) , -#define fm_is_empty(...) fm_and(fm__is_emptyfirst(__VA_ARGS__), fm_va_single(__VA_ARGS__)) -#define fm_isnt_empty(...) fm_nand(fm__is_emptyfirst(__VA_ARGS__), fm_va_single(__VA_ARGS__)) -#else -#define fm_is_empty fm_no_va -#define fm_isnt_empty fm_va_01 -#endif +/****************************************/ +// Iteration +/* recursion engine */ #define fm_eval(...) fm__eval_0(__VA_ARGS__) #ifdef FU_LONG_EVAL #define fm__eval_0(...) fm__eval_1(fm__eval_1(fm__eval_1(fm__eval_1(__VA_ARGS__)))) @@ -158,75 +158,96 @@ #define fm__eval_2(...) fm__eval_3(fm__eval_3(__VA_ARGS__)) #define fm__eval_3(...) __VA_ARGS__ -#define fm_foreach(macro, ...) \ - fm_when(fm_va_01(__VA_ARGS__))( \ - fm_apply_1(macro, __VA_ARGS__) \ - fm_recurs2(fm_, foreach) (\ - macro, fm_tail(__VA_ARGS__) \ - ) \ - ) +// recursion handle : delay macro expansion to next recursion iteration +#define fm_recurs(id) id fm_empty fm_empty() () +#define fm_recurs2(a,b) fm_cat fm_empty fm_empty() () (a,b) +#define fm_defer(id) id fm_empty() -#define fm_foreach_arg(macro, arg, ...) \ - fm_when(fm_va_01(__VA_ARGS__))( \ - fm_apply_2(macro, arg, __VA_ARGS__) \ - fm_recurs2(fm_, foreach_arg) (\ - macro, arg, fm_tail(__VA_ARGS__) \ - ) \ - ) +#define fm_foreach_join(join, macro, ...) \ + fm_foreach_join_(fm_empty, join, macro, __VA_ARGS__) +#define fm_foreach_join_(join1, join2, macro, ...) \ + fm_cat(fm_foreach_join_, fm_va_01n(__VA_ARGS__))(join1, join2, macro, __VA_ARGS__) +#define fm_foreach_join_0(join1, join2, macro, ...) +#define fm_foreach_join_1(join1, join2, macro, x) \ + join1() macro(x) +#define fm_foreach_join_n(join1, join2, macro, x, y, ...) \ + join1() macro(x) \ + join2() macro(y) \ + fm_recurs2(fm_, foreach_join_) (join2, join2, macro, __VA_ARGS__) +#define fm_foreach(macro, ...) \ + fm_foreach_join(fm_empty, macro, __VA_ARGS__) #define fm_foreach_comma(macro, ...) \ - fm_when(fm_va_01(__VA_ARGS__))( \ - fm_apply_1(macro, __VA_ARGS__\ - )fm_if(fm_va_single(__VA_ARGS__), , fm__comma)\ - fm_recurs2(fm_, foreach_comma) (\ - macro, fm_tail(__VA_ARGS__) \ - ) \ - ) + fm_foreach_join(fm_comma, macro, __VA_ARGS__) + +#define fm_foreach_arg_join(join, macro, arg, ...) \ + fm_foreach_arg_join_(fm_empty, join, macro, arg, __VA_ARGS__) +#define fm_foreach_arg_join_(join1, join2, macro, arg, ...) \ + fm_cat(fm_foreach_arg_join_, fm_va_01n(__VA_ARGS__))(join1, join2, macro, arg, __VA_ARGS__) +#define fm_foreach_arg_join_0(join1, join2, macro, ...) +#define fm_foreach_arg_join_1(join1, join2, macro, arg, x) \ + join1() macro(arg, x) +#define fm_foreach_arg_join_n(join1, join2, macro, arg, x, y, ...) \ + join1() macro(arg, x) \ + join2() macro(arg, y) \ + fm_recurs2(fm_, foreach_arg_join_) (join2, join2, macro, arg, __VA_ARGS__) +#define fm_foreach_arg(macro, arg, ...) \ + fm_foreach_arg_join(fm_empty, macro, arg, __VA_ARGS__) +#define fm_foreach_arg_comma(macro, arg, ...) \ + fm_foreach_arg_join(fm_comma, macro, arg, __VA_ARGS__) + +#define fm_foreach_tuple_join(join, macro, ...) \ + fm_foreach_tuple_join_(fm_empty, join, macro, __VA_ARGS__) +#define fm_foreach_tuple_join_(join1, join2, macro, ...) \ + fm_cat(fm_foreach_tuple_join_, fm_va_01n(__VA_ARGS__))(join1, join2, macro, __VA_ARGS__) +#define fm_foreach_tuple_join_0(join1, join2, macro, ...) +#define fm_foreach_tuple_join_1(join1, join2, macro, x) \ + join1() macro x +#define fm_foreach_tuple_join_n(join1, join2, macro, x, y, ...) \ + join1() macro x \ + join2() macro y \ + fm_recurs2(fm_, foreach_tuple_join_) (join2, join2, macro, __VA_ARGS__) #define fm_foreach_tuple(macro, ...) \ - fm_when(fm_va_01(__VA_ARGS__))( \ - fm_apply_tuple_1(macro, __VA_ARGS__) \ - fm_recurs2(fm_, foreach_tuple) (\ - macro, fm_tail(__VA_ARGS__) \ - ) \ - ) - -#define fm_foreach_tuple_arg(macro, arg, ...) \ - fm_when(fm_va_01(__VA_ARGS__))( \ - fm_apply_tuple_2(macro, arg, __VA_ARGS__) \ - fm_recurs2(fm_, foreach_tuple_arg) (\ - macro, arg, fm_tail(__VA_ARGS__) \ - ) \ - ) - + fm_foreach_tuple_join(fm_empty, macro, __VA_ARGS__) #define fm_foreach_tuple_comma(macro, ...) \ - fm_when(fm_va_01(__VA_ARGS__))( \ - fm_apply_tuple_1(macro, __VA_ARGS__\ - )fm_if(fm_va_single(__VA_ARGS__), fm_empty(), fm__comma)\ - fm_recurs2(fm_, foreach_tuple_comma) (\ - macro, fm_tail(__VA_ARGS__) \ - ) \ - ) + fm_foreach_tuple_join(fm_comma, macro, __VA_ARGS__) + +#define fm_foreach_tuple_arg_join(join, macro, arg, ...) \ + fm_foreach_tuple_arg_join_(fm_empty, join, macro, arg, __VA_ARGS__) +#define fm_foreach_tuple_arg_join_(join1, join2, macro, arg, ...) \ + fm_cat(fm_foreach_tuple_arg_join_, fm_va_01n(__VA_ARGS__))(join1, join2, macro, arg, __VA_ARGS__) +#define fm_foreach_tuple_arg_join_0(join1, join2, macro, ...) +#define fm_foreach_tuple_arg_join_1(join1, join2, macro, arg, x) \ + join1() fm_apply(macro, arg, fm_expand x) +#define fm_foreach_tuple_arg_join_n(join1, join2, macro, arg, x, y, ...) \ + join1() fm_apply(macro, arg, fm_expand x) \ + join2() fm_apply(macro, arg, fm_expand y) \ + fm_recurs2(fm_, foreach_tuple_arg_join_) (join2, join2, macro, arg, __VA_ARGS__) +#define fm_foreach_tuple_arg(macro, arg, ...) \ + fm_foreach_tuple_arg_join(fm_empty, macro, arg, __VA_ARGS__) +#define fm_foreach_tuple_arg_comma(macro, arg, ...) \ + fm_foreach_tuple_arg_join(fm_comma, macro, arg, __VA_ARGS__) #define fm_eval_foreach(macro, ...) \ - fm_eval(fm_foreach(macro, __VA_ARGS__)) + fm_eval(fm_foreach(macro, __VA_ARGS__)) #define fm_eval_foreach_comma(macro, ...) \ - fm_eval(fm_foreach_comma(macro, __VA_ARGS__)) + fm_eval(fm_foreach_comma(macro, __VA_ARGS__)) #define fm_eval_foreach_arg(macro, arg, ...) \ - fm_eval(fm_foreach_arg(macro, arg, __VA_ARGS__)) + fm_eval(fm_foreach_arg(macro, arg, __VA_ARGS__)) #define fm_eval_tuples(macro, ...) \ - fm_eval(fm_foreach_tuple(macro, __VA_ARGS__)) + fm_eval(fm_foreach_tuple(macro, __VA_ARGS__)) #define fm_eval_tuples_arg(macro, arg, ...) \ - fm_eval(fm_foreach_tuple_arg(macro, arg, __VA_ARGS__)) + fm_eval(fm_foreach_tuple_arg(macro, arg, __VA_ARGS__)) #define fm_eval_tuples_comma(macro, ...) \ - fm_eval(fm_foreach_tuple_comma(macro, __VA_ARGS__)) + fm_eval(fm_foreach_tuple_comma(macro, __VA_ARGS__)) #define fm__dumb_require_semicolon \ struct __dumb_struct_declaration_for_semicolon diff --git a/src/fu_util/fo_obj.h b/src/fu_util/fo_obj.h index 6e196b91c..70d4ee6b9 100644 --- a/src/fu_util/fo_obj.h +++ b/src/fu_util/fo_obj.h @@ -39,6 +39,9 @@ extern void fobj_freeze(void); /* Generate all method boilerplate. */ #define fobj_method(method) fobj__define_method(method) +/* Generates runtime privately called method boilerplate */ +#define fobj_special_method(meth) fobj__special_method(meth) + /* * Ensure method initialized. * Calling fobj_method_init is not required, @@ -197,7 +200,7 @@ extern fobj_t fobj_swap(fobj_t* var, fobj_t newval); * It is called automatically before destroying object. */ #define mth__fobjDispose void -fobj__special_void_method(fobjDispose); +fobj_special_method(fobjDispose); /* * returns globally allocated klass name. @@ -343,16 +346,21 @@ extern fobj_klass_handle_t fobj_real_klass_of(fobj_t); #define $notNULL(iface) ((iface).self != NULL) #define $setNULL(ifacep) ((ifacep)->self = NULL) #define $null(iface_type) ((iface_type##_i){NULL}) + /* * Base type */ -#define mth__fobjRepr struct fobjStr* -fobj_method(fobjRepr); +#define iface__fobj mth(fobjKlass, fobjRepr) +/* hardcoded instantiation because of fobj_iface always include iface__fobj */ +fobj__iface_declare_i(fobj, (mth, fobjKlass, fobjRepr)); + +#define mth__fobjRepr union fobjStr* +fobj__define_base_method(fobjRepr); #define mth__fobjKlass fobj_klass_handle_t -fobj_method(fobjKlass); +fobj__define_base_method(fobjKlass); -#define $repr(obj) $(fobjRepr, (obj)) -#define $irepr(iface) $(fobjRepr, (iface).self) +#define $repr(obj) fobj_getstr(fobjRepr(obj)).ptr +#define $irepr(iface) fobj_getstr(fobjRepr((iface).self)).ptr typedef struct fobjBase { char fobj__base[0]; @@ -371,15 +379,17 @@ fobj_method(fobjFormat); * String */ -typedef struct fobjStr { - const char *ptr; - uint32_t len; - char _buf[]; /* private buffer for copied string */ -} fobjStr; +typedef union fobjStr fobjStr; ft_inline fobjStr* fobj_str(const char* s); -#define $S(s) fobj_str(s) -extern fobjStr* fobj_newstr(ft_str_t str, bool gifted); +ft_inline fobjStr* fobj_str_const(const char* s); +#define $S(s) (__builtin_constant_p(s) ? fobj_str_const(s) : fobj_str(s)) +enum FOBJ_STR_ALLOC { + FOBJ_STR_GIFTED, + FOBJ_STR_CONST, + FOBJ_STR_COPY, +}; +extern fobjStr* fobj_newstr(ft_str_t str, enum FOBJ_STR_ALLOC ownership); ft_inline ft_str_t fobj_getstr(fobjStr *str); /* @@ -391,6 +401,7 @@ ft_inline fobjStr* fobj_strbuf_steal(ft_strbuf_t *buf); ft_gnu_printf(1, 2) extern fobjStr* fobj_sprintf(const char* fmt, ...); extern fobjStr* fobj_strcat(fobjStr *ostr, ft_str_t str); +extern fobjStr* fobj_strcat2(fobjStr *ostr, ft_str_t str1, ft_str_t str2); ft_inline fobjStr* fobj_strcatc(fobjStr *ostr, const char *str); ft_inline fobjStr* fobj_strcatc2(fobjStr *ostr, const char *str1, const char *str2); ft_inline fobjStr* fobj_stradd(fobjStr *ostr, fobjStr *other); @@ -407,8 +418,8 @@ ft_inline FT_CMP_RES fobj_strcmp_c(fobjStr* self, const char *oth); /* turn object to string using fobjFormat */ extern fobjStr* fobj_tostr(fobj_t obj, const char* fmt); -#define $tostr(obj, ...) fobj_tostr((obj), fm_or_default(__VA_ARGS__)(NULL)) -#define $itostr(obj, ...) fobj_tostr((obj).self, fm_or_default(__VA_ARGS__)(NULL)) +#define $tostr(obj, ...) fobj_getstr(fobj_tostr((obj), fm_or_default(__VA_ARGS__)(NULL))).ptr +#define $itostr(obj, ...) fobj_getstr(fobj_tostr((obj).self, fm_or_default(__VA_ARGS__)(NULL))).ptr #define kls__fobjStr mth(fobjRepr, fobjFormat) fobj_klass(fobjStr); @@ -497,7 +508,7 @@ extern fobjStr* fobj_printkv(const char *fmt, ft_slc_fokv_t kv); */ #define mth___fobjErr_marker_DONT_IMPLEMENT_ME void -fobj__special_void_method(_fobjErr_marker_DONT_IMPLEMENT_ME); +fobj_special_method(_fobjErr_marker_DONT_IMPLEMENT_ME); #define iface__err mth(_fobjErr_marker_DONT_IMPLEMENT_ME) fobj_iface(err); @@ -548,7 +559,7 @@ fobj_error_cstr_key(__msgSuffix); * $syserr(errno, "allocation error") * $syserr(errno, "Could not open file {path}", (path, filename)) */ -#define $syserr(erno, ...) fobj_make_syserr((erno), __VA_ARGS__) +#define $syserr(erno, ...) fobj_make_syserr((erno), __VA_ARGS__) /* fetch key back */ #define $errkey(key, err, ...) fobj__err_getkey(key, err, __VA_ARGS__) diff --git a/src/fu_util/ft_array.inc.h b/src/fu_util/ft_array.inc.h index 115aa210e..57d7cad42 100644 --- a/src/fu_util/ft_array.inc.h +++ b/src/fu_util/ft_array.inc.h @@ -540,7 +540,7 @@ ft_array_walk_r(ft_array_type *arr, ft_inline void ft_array_walk(ft_array_type *arr, FT_WALK_ACT (*walk)(FT_SLICE_TYPE *el)) { - ft_array_walk_r(arr, (FT_WALK_ACT (*)(FT_SLICE_TYPE*, ft_arg_t))walk, ft_mka_z()); + ft_array_walk_r(arr, (FT_WALK_ACT (*)(FT_SLICE_TYPE*, ft_arg_t))(void*)walk, ft_mka_z()); } #undef FT_SLICE diff --git a/src/fu_util/ft_search.inc.h b/src/fu_util/ft_search.inc.h index 432554c78..b567e11bf 100644 --- a/src/fu_util/ft_search.inc.h +++ b/src/fu_util/ft_search.inc.h @@ -79,7 +79,7 @@ ft_func_bsearch_r(FT_SEARCH_TYPE *arr, size_t len, FT_SEARCH_PATTERN pat, ft_inline ft_bsres_t ft_func_bsearch(FT_SEARCH_TYPE *arr, size_t len, FT_SEARCH_PATTERN pat, _ft_cmp_def(cmp)) { - return ft_func_bsearch_r(arr, len, pat, (_ft_cmp_def_r()) cmp, ft_mka_z()); + return ft_func_bsearch_r(arr, len, pat, (_ft_cmp_def_r())(void*) cmp, ft_mka_z()); } ft_inline size_t @@ -96,7 +96,7 @@ ft_func_search_r(FT_SEARCH_TYPE *arr, size_t len, FT_SEARCH_PATTERN pat, ft_inline size_t ft_func_search(FT_SEARCH_TYPE *arr, size_t len, FT_SEARCH_PATTERN pat, _ft_cmp_def(cmp)) { - return ft_func_search_r(arr, len, pat, (_ft_cmp_def_r()) cmp, ft_mka_z()); + return ft_func_search_r(arr, len, pat, (_ft_cmp_def_r())(void*) cmp, ft_mka_z()); } #undef FT_SEARCH diff --git a/src/fu_util/ft_sort.inc.h b/src/fu_util/ft_sort.inc.h index a4eb22d2d..a55093908 100644 --- a/src/fu_util/ft_sort.inc.h +++ b/src/fu_util/ft_sort.inc.h @@ -71,7 +71,7 @@ ft_func_shsort_r(FT_SORT_TYPE *arr, size_t len, _ft_cmp_def_r(cmp), ft_arg_t arg ft_inline ft_optimize3 void ft_func_shsort(FT_SORT_TYPE *arr, size_t len, _ft_cmp_def(cmp)) { - ft_func_shsort_r(arr, len, (_ft_cmp_def_r()) cmp, ft_mka_z()); + ft_func_shsort_r(arr, len, (_ft_cmp_def_r())(void*) cmp, ft_mka_z()); } ft_inline ft_optimize3 void @@ -159,7 +159,7 @@ ft_func_qsort_r(FT_SORT_TYPE *arr_, size_t len_, _ft_cmp_def_r(cmp), ft_arg_t ar ft_inline ft_optimize3 void ft_func_qsort(FT_SORT_TYPE *arr, size_t len, _ft_cmp_def(cmp)) { - ft_func_qsort_r(arr, len, (_ft_cmp_def_r()) cmp, ft_mka_z()); + ft_func_qsort_r(arr, len, (_ft_cmp_def_r())(void*) cmp, ft_mka_z()); } diff --git a/src/fu_util/ft_util.h b/src/fu_util/ft_util.h index d9eaeb881..56a0d05d2 100644 --- a/src/fu_util/ft_util.h +++ b/src/fu_util/ft_util.h @@ -23,7 +23,7 @@ typedef SSIZE_T ssize_t; #ifdef __GNUC__ #define ft_gcc_const __attribute__((const)) #define ft_gcc_pure __attribute__((pure)) -#if __GNUC__ > 10 +#if __GNUC__ > 10 && !defined(__clang__) #define ft_gcc_malloc(free, idx) __attribute__((malloc, malloc(free, idx))) #else #define ft_gcc_malloc(free, idx) __attribute__((malloc)) @@ -32,6 +32,7 @@ typedef SSIZE_T ssize_t; #define ft_gnu_printf(fmt, arg) __attribute__((format(printf,fmt,arg))) #define ft_likely(x) __builtin_expect(!!(x), 1) #define ft_unlikely(x) __builtin_expect(!!(x), 0) +#define ft_always_inline __attribute__((always_inline)) #else #define ft_gcc_const #define ft_gcc_pure @@ -40,7 +41,9 @@ typedef SSIZE_T ssize_t; #define ft_gnu_printf(fmt, arg) #define ft_likely(x) (x) #define ft_unlikely(x) (x) +#define ft_always_inline #endif +#define ft_static static ft_unused #define ft_inline static ft_unused inline #if defined(__GNUC__) && !defined(__clang__) @@ -158,9 +161,8 @@ extern void* ft_realloc_arr(void* ptr, size_t elem_sz, size_t old_elems, size_t #define ft_free(ptr) ft_realloc((ptr), 0) #define ft_calloc_arr(sz, cnt) ft_calloc(ft_mul_size((sz), (cnt))) -extern void ft_set_allocators( - void *(*_realloc)(void *, size_t), - void (*_free)(void*)); +extern void ft_set_allocators(void *(*_realloc)(void *, size_t), + void (*_free)(void*)); /* overflow checking size addition and multiplication */ ft_inline size_t ft_add_size(size_t a, size_t b); @@ -214,6 +216,8 @@ ft_inline uint32_t ft_mix32(uint32_t data); extern uint32_t ft_rand(void); /* Dumb quality random 0<=r 0 && meth <= atload(&fobj_methods_n)); ft_assert(meth != fobj__nm_mhandle(fobjDispose)()); } @@ -200,11 +200,13 @@ fobj_method_search(const fobj_t self, fobj_method_handle_t meth, fobj_klass_hand h = ((fobj_header_t*)self - 1); assert(h->magic == FOBJ_HEADER_MAGIC); klass = h->klass; - ft_dbg_assert(klass > 0 && klass <= atload(&fobj_klasses_n)); - ft_assert((h->flags & FOBJ_DISPOSED) == 0, "Call '%s' on disposed object '%s'", - fobj_methods[meth].name, fobj_klasses[klass].name); + if (ft_unlikely(ft_dbg_enabled())) { + ft_assert(klass > 0 && klass <= atload(&fobj_klasses_n)); + ft_assert((h->flags & FOBJ_DISPOSED) == 0, "Call '%s' on disposed object '%s'", + fobj_methods[meth].name, fobj_klasses[klass].name); + } - if (ft_unlikely(for_child != 0)) { + if (for_child != 0) { if (ft_unlikely(ft_dbg_enabled())) { while (klass && klass != for_child) { klass = fobj_klasses[klass].parent; @@ -258,7 +260,7 @@ fobj_method_implements(const fobj_t self, fobj_method_handle_t meth) { extern void fobj__validate_args(fobj_method_handle_t meth, fobj_t self, - const char** paramnames, + const char* const * paramnames, const char *set, size_t cnt) { fobj_header_t *h; @@ -588,7 +590,7 @@ fobjBase_fobjKlass(fobj_t self) { return fobj_real_klass_of(self); } -static struct fobjStr* +static fobjStr* fobjBase_fobjRepr(VSelf) { Self(fobjBase); fobj_klass_handle_t klass = fobjKlass(self); @@ -616,43 +618,101 @@ fobj_err_combine(err_i fst, err_i scnd) { return fst; } +static fobjStr* +fobj_reservestr(size_t size) { + fobjStr *str; +#if __SIZEOF_POINTER__ < 8 + ft_assert(size < (1<<30)-2); +#else + ft_assert(size < UINT32_MAX-2); +#endif + if (size < FOBJ_STR_SMALL_SIZE) { + if (size < FOBJ_STR_FREE_SPACE) + str = fobj_alloc(fobjStr); + else { + size_t diff = size + 1 - FOBJ_STR_FREE_SPACE; + str = fobj_alloc_sized(fobjStr, diff); + } + str->small.type = FOBJ_STR_SMALL; + str->small.len = size; + str->small.buf[size] = '\0'; + } else { + str = fobj_alloc_sized(fobjStr, size + 1); + str->ptr.type = FOBJ_STR_UNOWNED; // abuse it because we don't need separate deallocation + str->ptr.len = size; + str->ptr.ptr = (char*)(str+1); + str->ptr.ptr[size] = '\0'; + } + return str; +} + fobjStr* -fobj_newstr(ft_str_t s, bool gifted) { +fobj_newstr(ft_str_t s, enum FOBJ_STR_ALLOC ownership) { fobjStr *str; +#if __SIZEOF_POINTER__ < 8 + ft_assert(size < (1<<30)-2); +#else ft_assert(s.len < UINT32_MAX-2); - if (!gifted) { - str = fobj_alloc_sized(fobjStr, s.len + 1, .len = s.len); - memcpy(str->_buf, s.ptr, s.len); - str->_buf[s.len] = '\0'; - str->ptr = str->_buf; - } else { - str = fobj_alloc(fobjStr, .len = s.len, .ptr = s.ptr); +#endif + if (s.len >= FOBJ_STR_FREE_SPACE && + (ownership == FOBJ_STR_GIFTED || ownership == FOBJ_STR_CONST)) { + str = fobj_alloc(fobjStr); + str->ptr.type = ownership == FOBJ_STR_GIFTED ? FOBJ_STR_PTR : FOBJ_STR_UNOWNED; + str->ptr.len = s.len; + str->ptr.ptr = s.ptr; + return str; } + str = fobj_reservestr(s.len); + memcpy(fobj_getstr(str).ptr, s.ptr, s.len); + if (ownership == FOBJ_STR_GIFTED) + ft_free(s.ptr); return str; } +ft_inline fobjStr* fobj_str_const(const char* s); + static void fobjStr_fobjDispose(VSelf) { Self(fobjStr); - if (self->ptr != self->_buf) { - ft_free((void*)self->ptr); + if (self->type == FOBJ_STR_PTR) { + ft_free(self->ptr.ptr); } } fobjStr* fobj_strcat(fobjStr *self, ft_str_t s) { fobjStr *newstr; - size_t alloc_len = self->len + s.len + 1; + ft_str_t news; + ft_str_t selfs = fobj_getstr(self); + size_t alloc_len = selfs.len + s.len + 1; ft_assert(alloc_len < UINT32_MAX-2); if (s.len == 0) - return $unref($ref(self)); + return self; - newstr = fobj_alloc_sized(fobjStr, alloc_len, .len = alloc_len-1); - memcpy(newstr->_buf, self->ptr, self->len); - memcpy(newstr->_buf + self->len, s.ptr, s.len); - newstr->_buf[newstr->len] = '\0'; - newstr->ptr = newstr->_buf; + newstr = fobj_reservestr(alloc_len-1); + news = fobj_getstr(newstr); + memcpy(news.ptr, selfs.ptr, selfs.len); + memcpy(news.ptr + selfs.len, s.ptr, s.len); + return newstr; +} + +fobjStr* +fobj_strcat2(fobjStr *self, ft_str_t s1, ft_str_t s2) { + fobjStr *newstr; + ft_str_t news; + ft_str_t selfs = fobj_getstr(self); + size_t alloc_len = selfs.len + s1.len + s2.len + 1; + ft_assert(alloc_len < UINT32_MAX-2); + + if (s1.len + s2.len == 0) + return self; + + newstr = fobj_reservestr(alloc_len-1); + news = fobj_getstr(newstr); + memcpy(news.ptr, selfs.ptr, selfs.len); + memcpy(news.ptr + selfs.len, s1.ptr, s1.len); + memcpy(news.ptr + selfs.len + s1.len, s2.ptr, s2.len); return newstr; } @@ -685,10 +745,10 @@ fobj_strcatf(fobjStr *ostr, const char *fmt, ...) { } /* empty print? */ - if (buf.ptr == ostr->ptr) { - return $unref($ref(ostr)); + if (ft_strbuf_ref(&buf).ptr == fobj_getstr(ostr).ptr) { + return ostr; } - return fobj_newstr(ft_strbuf_steal(&buf), true); + return fobj_strbuf_steal(&buf); } fobjStr* @@ -700,6 +760,10 @@ fobj_tostr(fobj_t obj, const char *fmt) { return fobj_str(""); } + if (fobj_real_klass_of(obj) == fobjStr__kh() && (fmt == NULL || fmt[0] == '\0')) { + return obj; + } + if (!$ifdef(, fobjFormat, obj, &buf, fmt)) { /* fallback to Repr */ return $(fobjRepr, obj); @@ -1243,7 +1307,7 @@ fobj_printkv(const char *fmt, ft_slc_fokv_t kvs) { ft_strbuf_catc(&out, "NULL"); } else if (!$ifdef(, fobjFormat, kvs.ptr[i].value, &out, format)) { /* fallback to repr */ - ft_strbuf_cat(&out, fobj_getstr($repr(kvs.ptr[i].value))); + ft_strbuf_cat(&out, fobj_getstr(fobjRepr(kvs.ptr[i].value))); } cur = closebrace; } @@ -1276,7 +1340,7 @@ fobjBase__kh(void) { } fobj_klass_handle(fobjErr, mth(fobjRepr, _fobjErr_marker_DONT_IMPLEMENT_ME), varsized(kv)); -fobj_klass_handle(fobjStr, mth(fobjDispose), varsized(_buf)); +fobj_klass_handle(fobjStr, mth(fobjDispose), varsized()); fobj_klass_handle(fobjInt); fobj_klass_handle(fobjUInt); fobj_klass_handle(fobjFloat); diff --git a/src/fu_util/impl/fo_impl.h b/src/fu_util/impl/fo_impl.h index 9ef28dde7..88a6d8aec 100644 --- a/src/fu_util/impl/fo_impl.h +++ b/src/fu_util/impl/fo_impl.h @@ -8,22 +8,7 @@ typedef uint16_t fobj_klass_handle_t; typedef uint16_t fobj_method_handle_t; -/* Named argument handling tools */ -#if defined(__clang__) || defined(__clang_analyzer__) -#define fobj__push_ignore_initializer_overrides \ - _Pragma("clang diagnostic push"); \ - _Pragma("clang diagnostic ignored \"-Winitializer-overrides\"") -#define fobj__pop_ignore_initializer_overrides \ - _Pragma("clang diagnostic pop") -#else -#define fobj__push_ignore_initializer_overrides \ - _Pragma("GCC diagnostic push"); \ - _Pragma("GCC diagnostic ignored \"-Woverride-init-side-effects\"") -#define fobj__pop_ignore_initializer_overrides \ - _Pragma("GCC diagnostic pop") -#endif - -#ifndef NDEBUG +#define FOBJ_ARGS_COMPLEX typedef struct fobj__missing_argument_detector { char is_set; @@ -31,24 +16,16 @@ typedef struct fobj__missing_argument_detector { #define fobj__dumb_arg ((fobj__missing_argument_detector){1}) #define fobj__check_arg(name) fobj__nm_given(name).is_set -#else - -typedef struct fobj__missing_argument_detector { -} fobj__missing_argument_detector; -#define fobj__dumb_arg {} -#define fobj__check_arg(name) - -#endif - typedef struct { fobj_method_handle_t meth; void* impl; } fobj__method_impl_box_t; -/* param to tuple coversion */ +/* params coversions */ +/* map params to tuples */ #define fobj__map_params(...) \ - fm_eval(fm_foreach_comma(fobj__map_param, __VA_ARGS__)) + fm_eval_foreach_comma(fobj__map_param, __VA_ARGS__) #define fobj__map_params_(...) \ fm_foreach_comma(fobj__map_param, __VA_ARGS__) #define fobj__map_param(param) \ @@ -58,6 +35,25 @@ typedef struct { #define fobj__map_param_iface(...) (iface, __VA_ARGS__) #define fobj__map_param_inherits(parent) (inherits, parent) +/* filter and flatten methods */ +#define fobj__flat_methods(...) \ + fm_tail(fm_eval_tuples(fobj__fetch_methods, __VA_ARGS__)) +#define fobj__fetch_methods(tag, ...) fobj__fetch_methods_##tag(__VA_ARGS__) +#define fobj__fetch_methods_mth(...) , __VA_ARGS__ +#define fobj__fetch_methods_iface(...) +#define fobj__fetch_methods_inherits(...) +#define fobj__fetch_methods_varsized(...) + +/* filter and flatten interfaces */ +#define fobj__flat_ifaces(...) \ + fm_tail(fm_eval_tuples(fobj__fetch_ifaces, __VA_ARGS__)) +#define fobj__fetch_ifaces(tag, ...) \ + fobj__fetch_ifaces_##tag(__VA_ARGS__) +#define fobj__fetch_ifaces_mth(...) +#define fobj__fetch_ifaces_iface(...) , __VA_ARGS__ +#define fobj__fetch_ifaces_inherits(...) +#define fobj__fetch_ifaces_varsized(...) + /* Standard naming */ #define fobj__nm_mth(meth) mth__##meth @@ -66,12 +62,12 @@ typedef struct { #define fobj__nm_iface(iface) iface__##iface #define fobj__nm_mhandle(meth) meth##__mh #define fobj__nm_params_t(meth) meth##__params_t -#define fobj__nm_invoke(meth) fobj__invoke_##meth +#define fobj__nm_invoke(meth) meth##__invoke #define fobj__nm_impl_t(meth) meth##__impl -#define fobj__nm_cb(meth) fetch_cb_##meth -#define fobj__nm_cb_t(meth) meth##__cb -#define fobj__nm_register(meth) fobj__register_##meth /* due to tcc bug, we can't use meth##__register */ -#define fobj__nm_wrap_decl(meth) fobj__wrap_decl_##meth +#define fobj__nm_cb(meth) meth##__fetch_cb +#define fobj__nm_cb_t(meth) meth##__cb_t +#define fobj__nm_register(meth) meth##__register +#define fobj__nm_wrap_decl(meth) meth##__wrap_decl #define fobj__nm_meth_i(meth) meth##_i #define fobj__nm_has(m) has_##m #define fobj__nm_bind(m_or_i) bind_##m_or_i @@ -85,13 +81,18 @@ typedef struct { /* Method definition */ #define fobj__predefine_method(method) \ - ft_inline ft_gcc_const fobj_method_handle_t fobj__nm_mhandle(method)(void) + ft_static ft_gcc_const fobj_method_handle_t fobj__nm_mhandle(method)(void) #define fobj__define_method(meth) \ - fobj__method_declare_i(meth, fobj__nm_mth(meth)) + fobj__method_declare_i(meth, fobj__nm_mth(meth)) \ + fobj__iface_declare_i(meth, fobj__map_params(mth(meth), iface__fobj)) \ + fm__dumb_require_semicolon +#define fobj__define_base_method(meth) \ + fobj__method_declare_i(meth, fobj__nm_mth(meth)) \ + fobj__iface_declare_i(meth, iface__fobj) \ + fm__dumb_require_semicolon #define fobj__method_declare_i(meth, ...) \ fobj__method_declare(meth, __VA_ARGS__) - #define fobj__method_declare(meth, res, ...) \ fobj__method_declare_impl(meth, \ fobj__nm_mhandle(meth), \ @@ -107,174 +108,136 @@ typedef struct { fobj__nm_bindref(meth), \ fobj__nm_implements(meth), \ fobj__nm_kvalidate(meth), \ - fm_va_comma(__VA_ARGS__), \ + fm_va_comma_fun(__VA_ARGS__), \ + res, __VA_ARGS__) + +#define fobj__special_method(meth) \ + fobj__special_method_declare_i(meth, fobj__nm_mth(meth)) \ + fm__dumb_require_semicolon +#define fobj__special_method_declare_i(meth, ...) \ + fobj__special_method_declare(meth, __VA_ARGS__) +#define fobj__special_method_declare(meth, res, ...) \ + fobj__method_common(meth, \ + fobj__nm_mhandle(meth), \ + fobj__nm_impl_t(meth), \ + fobj__nm_register(meth), \ + fobj__nm_wrap_decl(meth), \ + fm_va_comma_fun(__VA_ARGS__), \ res, __VA_ARGS__) #define fobj__method_declare_impl(meth, handle, \ - params_t, \ - invoke_methparams, \ - impl_meth_t, \ - cb_meth, cb_meth_t, \ - _register_meth, wrap_decl, \ - meth_i, bind_meth, bindref_meth, implements_meth, \ - kvalidate, comma, res, ...) \ - \ - ft_inline ft_gcc_const fobj_method_handle_t handle(void) { \ - static volatile fobj_method_handle_t hndl = 0; \ - fobj_method_handle_t h = hndl; \ - if (h) return h; \ - fobj_method_init_impl(&hndl, fm_str(meth)); \ - return hndl; \ - } \ - \ - typedef res (* impl_meth_t)(fobj_t self comma fobj__mapArgs_toArgs(__VA_ARGS__)); \ - \ - typedef struct params_t { \ - fobj__missing_argument_detector fobj__dumb_first_param; \ - fobj__mapArgs_toFields(__VA_ARGS__) \ - } params_t; \ - \ - typedef struct cb_meth_t { \ - fobj_t self; \ - impl_meth_t impl; \ - } cb_meth_t; \ - \ - ft_inline cb_meth_t \ - cb_meth(fobj_t self, fobj_klass_handle_t parent, bool validate) { \ - fobj__method_callback_t fnd = {NULL, NULL}; \ - fnd = fobj_method_search(self, handle(), parent, validate); \ - return (cb_meth_t){fnd.self, fnd.impl}; \ - } \ - \ - ft_inline res \ - meth(fobj_t self comma fobj__mapArgs_toArgs(__VA_ARGS__)) { \ - cb_meth_t cb = cb_meth(self, fobj_self_klass, true); \ - return cb.impl(cb.self comma fobj__mapArgs_toNames(__VA_ARGS__)); \ - } \ - \ - ft_inline void \ - _register_meth(fobj_klass_handle_t klass, impl_meth_t cb) { \ - fobj_method_register_impl(klass, handle(), (void *)cb); \ - } \ - \ - ft_inline fobj__method_impl_box_t \ - wrap_decl(impl_meth_t cb) { \ - return (fobj__method_impl_box_t) { handle(), cb }; \ - } \ - \ - typedef union meth_i { \ - fobj_t self; \ - uintptr_t fobj__nm_has(meth); \ - } meth_i;\ - \ - ft_inline meth_i \ - bind_meth(fobj_t self) { \ - ft_assert(fobj_method_implements(self, handle())); \ - return (meth_i){self}; \ - } \ - \ - ft_inline bool \ - implements_meth(fobj_t self, meth_i *ifacep) { \ - bool has = fobj_method_implements(self, handle()); \ - if (ifacep != NULL) \ - ifacep->self = has ? self : NULL; \ - return has; \ - } \ - \ - ft_inline meth_i \ - bindref_meth(fobj_t self) { \ - meth_i _iface = bind_meth(self); \ - fobj_ref(_iface.self); \ - return _iface; \ - } \ - \ - ft_inline void \ - kvalidate(fobj_klass_handle_t khandle) { \ - ft_assert(fobj_klass_method_search(khandle, handle()) != NULL); \ - } \ - \ - ft_inline res \ - invoke_methparams(cb_meth_t cb, params_t params) {\ - if (!(fobj__assertArgsAnd(__VA_ARGS__))) { \ - const char *params_s[] = { fobj__mapArgs_toNameStrs(__VA_ARGS__) }; \ - char set[] = {fobj__assertArgsVals(__VA_ARGS__)}; \ - fobj__validate_args(handle(), cb.self, params_s, set, ft_arrsz(params_s)); \ - } \ - return cb.impl(cb.self comma fobj__mapArgs_toNamedParams(__VA_ARGS__)); \ - } \ - \ - fm__dumb_require_semicolon + params_t, \ + invoke_methparams, \ + impl_meth_t, \ + cb_meth, cb_meth_t, \ + register_meth, wrap_decl, \ + meth_i, bind_meth, bindref_meth, \ + implements_meth, \ + kvalidate, comma, res, ...) \ + \ + fobj__method_common(meth, handle, impl_meth_t, register_meth, \ + wrap_decl, comma, res, __VA_ARGS__) \ + \ + typedef struct params_t { \ + fobj__mapArgs_toFields(__VA_ARGS__) \ + } params_t; \ + \ + typedef struct cb_meth_t { \ + fobj_t self; \ + impl_meth_t impl; \ + } cb_meth_t; \ + \ + ft_inline ft_always_inline cb_meth_t \ + cb_meth(fobj_t self, fobj_klass_handle_t parent, bool validate) { \ + fobj__method_callback_t fnd = {NULL, NULL}; \ + fnd = fobj_method_search(self, handle(), parent, validate); \ + return (cb_meth_t){fnd.self, fnd.impl}; \ + } \ + \ + ft_inline ft_always_inline res \ + meth(fobj_t self comma() fobj__mapArgs_toArgs(__VA_ARGS__)) { \ + cb_meth_t cb = cb_meth(self, fobj_self_klass, true); \ + return cb.impl(cb.self comma() fobj__mapArgs_toNames(__VA_ARGS__)); \ + } \ + \ + ft_inline ft_always_inline res \ + invoke_methparams(cb_meth_t cb, params_t params) {\ + fobj__params_defaults(meth); \ + fm_when_isnt_empty(__VA_ARGS__)( \ + if (ft_unlikely(!(fobj__assertArgsAnd(__VA_ARGS__)))) { \ + const char * const params_s[] = { fobj__mapArgs_toNameStrs(__VA_ARGS__) }; \ + char set[] = {fobj__assertArgsVals(__VA_ARGS__)}; \ + fobj__validate_args(handle(), cb.self, params_s, set, ft_arrsz(set)); \ + } ) \ + return cb.impl(cb.self comma() fobj__mapArgs_toNamedParams(__VA_ARGS__)); \ + } \ + +#define fobj__method_common(meth, handle, impl_meth_t, register_meth, \ + wrap_decl, comma, res, ...) \ + \ + ft_static ft_gcc_const fobj_method_handle_t handle(void) { \ + static volatile fobj_method_handle_t hndl = 0; \ + fobj_method_handle_t h = hndl; \ + if (ft_likely(h)) return h; \ + fobj_method_init_impl(&hndl, fm_str(meth)); \ + return hndl; \ + } \ + \ + typedef res (* impl_meth_t)(fobj_t self comma() fobj__mapArgs_toArgs(__VA_ARGS__)); \ + \ + ft_inline void \ + register_meth(fobj_klass_handle_t klass, impl_meth_t cb) { \ + fobj_method_register_impl(klass, handle(), (void *)cb); \ + } \ + \ + ft_inline fobj__method_impl_box_t \ + wrap_decl(impl_meth_t cb) { \ + return (fobj__method_impl_box_t) { handle(), cb }; \ + } -#define fobj__mapArgs_toArgs_do(x, y, ...) x y #define fobj__mapArgs_toArgs(...) \ - fm_eval(fm_foreach_tuple_comma(fobj__mapArgs_toArgs_do, __VA_ARGS__)) + fm_eval_tuples_comma(fobj__mapArgs_toArgs_do, __VA_ARGS__) +#define fobj__mapArgs_toArgs_do(x, y, ...) x y -#ifndef NDEBUG +#define fobj__mapArgs_toFields(...) \ + fm_eval_tuples(fobj__mapArgs_toFields_do, __VA_ARGS__) #define fobj__mapArgs_toFields_do(x, y, ...) \ x y; \ fobj__missing_argument_detector fobj__nm_given(y); -#else -#define fobj__mapArgs_toFields_do(x, y, ...) \ - x y; -#endif -#define fobj__mapArgs_toFields(...) \ - fm_eval(fm_foreach_tuple(fobj__mapArgs_toFields_do, __VA_ARGS__)) -#define fobj__mapArgs_toNames_do(x, y, ...) y #define fobj__mapArgs_toNames(...) \ - fm_eval(fm_foreach_tuple_comma(fobj__mapArgs_toNames_do, __VA_ARGS__)) + fm_eval_tuples_comma(fobj__mapArgs_toNames_do, __VA_ARGS__) +#define fobj__mapArgs_toNames_do(x, y, ...) y -#define fobj__mapArgs_toNameStrs_do(x, y, ...) #y #define fobj__mapArgs_toNameStrs(...) \ - fm_eval(fm_foreach_tuple_comma(fobj__mapArgs_toNameStrs_do, __VA_ARGS__)) + fm_eval_tuples_comma(fobj__mapArgs_toNameStrs_do, __VA_ARGS__) +#define fobj__mapArgs_toNameStrs_do(x, y, ...) #y -#define fobj__mapArgs_toNamedParams_do(x, y, ...) params.y #define fobj__mapArgs_toNamedParams(...) \ - fm_eval(fm_foreach_tuple_comma(fobj__mapArgs_toNamedParams_do, __VA_ARGS__)) + fm_eval_tuples_comma(fobj__mapArgs_toNamedParams_do, __VA_ARGS__) +#define fobj__mapArgs_toNamedParams_do(x, y, ...) params.y -#ifndef NDEBUG -#define fobj__assertArgsAnd_do(x, y, ...) & fobj__check_arg(params.y) #define fobj__assertArgsAnd(...) \ - 1 fm_eval(fm_foreach_tuple(fobj__assertArgsAnd_do, __VA_ARGS__)) -#else -#define fobj__assertArgsAnd(...) 1 -#endif + 1 fm_eval_tuples(fobj__assertArgsAnd_do, __VA_ARGS__) +#define fobj__assertArgsAnd_do(x, y, ...) & fobj__check_arg(params.y) -#ifndef NDEBUG -#define fobj__assertArgsVals_do(x, y, ...) fobj__check_arg(params.y) #define fobj__assertArgsVals(...) \ - fm_eval(fm_foreach_tuple_comma(fobj__assertArgsVals_do, __VA_ARGS__)) -#else -#define fobj__assertArgsVals(...) -#endif + fm_eval_tuples_comma(fobj__assertArgsVals_do, __VA_ARGS__) +#define fobj__assertArgsVals_do(x, y, ...) fobj__check_arg(params.y) -#define fobj__assertArgs_do(x, y, ...) fobj__check_arg(params.y) -#define fobj__assertArgs(...) \ - fm_eval(fm_foreach_tuple(fobj__assertArgs_do, __VA_ARGS__)) - -#define fobj__special_void_method(meth) \ - \ - ft_inline ft_gcc_const fobj_method_handle_t fobj__nm_mhandle(meth) (void) { \ - static volatile fobj_method_handle_t hndl = 0; \ - fobj_method_handle_t h = hndl; \ - if (h) return h; \ - fobj_method_init_impl(&hndl, fm_str(meth)); \ - return hndl; \ - } \ - \ - typedef void (* fobj__nm_impl_t(meth))(fobj_t self); \ - \ - ft_inline void \ - fobj__nm_register(meth)(fobj_klass_handle_t klass, fobj__nm_impl_t(meth) cb) { \ - fobj_method_register_impl(klass, fobj__nm_mhandle(meth)(), (void *)cb); \ - } \ - \ - ft_inline fobj__method_impl_box_t \ - fobj__nm_wrap_decl(meth)(fobj__nm_impl_t(meth) cb) { \ - return (fobj__method_impl_box_t) { fobj__nm_mhandle(meth)(), cb }; \ - } \ - \ - fm__dumb_require_semicolon +#define fobj__params_defaults(meth) \ + fobj__params_defaults_i(meth, fobj__nm_mthdflt(meth)()) +#define fobj__params_defaults_i(meth, ...) \ + fm_when(fm_is_tuple(fm_head(__VA_ARGS__))) ( \ + fobj__params_defaults_impl(__VA_ARGS__) \ + ) +#define fobj__params_defaults_impl(...) \ + fm_eval_tuples(fobj__params_defaults_each, __VA_ARGS__) +#define fobj__params_defaults_each(x, ...) \ + if (!fobj__check_arg(params.x)) { \ + fm_when_isnt_empty(__VA_ARGS__)( params.x = __VA_ARGS__; ) \ + params.fobj__nm_given(x) = fobj__dumb_arg; \ + } /* Klass declarations */ @@ -282,11 +245,9 @@ typedef struct { extern fobj_klass_handle_t fobj__nm_khandle(klass)(void) ft_gcc_const; \ fm__dumb_require_semicolon - #define fobj__klass_handle(klass, ...) \ fobj__klass_handle_i(klass, \ - fobj__map_params(fobj__nm_kls(klass)) \ - fm_va_comma(__VA_ARGS__) fobj__map_params(__VA_ARGS__)) + fobj__map_params(fobj__nm_kls(klass) fm_when_isnt_empty(__VA_ARGS__)(fm__comma __VA_ARGS__))) #define fobj__klass_handle_i(klass, ...) \ fobj__klass_handle_impl(klass, __VA_ARGS__) #define fobj__klass_handle_impl(klass, ...) \ @@ -306,85 +267,44 @@ typedef struct { return hndl; \ } \ khandle = hndl; \ - fm_when(fm_isnt_empty(fobj__klass_has_iface(__VA_ARGS__))) ( \ - fobj__klass_check_iface(klass, __VA_ARGS__) \ - ) \ + fobj__klass_check_ifaces(klass, __VA_ARGS__) \ return khandle; \ - } \ - fm__dumb_require_semicolon + } -#define fobj__klass_detect_size_varsized_1(klass, fld, ...) \ - kls_size = -1-offsetof(klass,fld); -#define fobj__klass_detect_size_varsized_0(klass, ...) \ - kls_size = -1-sizeof(klass); -#define fobj__klass_detect_size_varsized(klass, ...) \ - fm_cat(fobj__klass_detect_size_varsized_, fm_va_01(__VA_ARGS__))(klass, __VA_ARGS__) -#define fobj__klass_detect_size_mth(...) +#define fobj__klass_detect_size(klass, tag, ...) \ + fobj__klass_detect_size_##tag (klass, __VA_ARGS__) #define fobj__klass_detect_size_inherits(klass, parent) \ kparent = fobj__nm_khandle(parent)(); +#define fobj__klass_detect_size_varsized(klass, ...) \ + fm_iif(fm_va_01(__VA_ARGS__)) \ + ( kls_size = -1-offsetof(klass,fm_head(__VA_ARGS__)); ) \ + ( kls_size = -1-sizeof(klass); ) +#define fobj__klass_detect_size_mth(...) #define fobj__klass_detect_size_iface(...) -#define fobj__klass_detect_size(klass, tag, ...) \ - fobj__klass_detect_size_##tag (klass, __VA_ARGS__) + +#define fobj__klass_decl_methods(klass, ...) \ + fm_eval_foreach_arg(fobj__klass_decl_method, klass, fobj__flat_methods(__VA_ARGS__)) +#define fobj__klass_decl_method(klass, meth) \ + fobj__nm_wrap_decl(meth)(fobj__nm_klass_meth(klass, meth)), + +#define fobj__klass_check_ifaces(klass, ...) \ + fm_eval_foreach_arg(fobj__klass_check_iface, klass, fobj__flat_ifaces(__VA_ARGS__)) +#define fobj__klass_check_iface(klass, iface) \ + fobj__nm_kvalidate(iface)(khandle); #define fobj__method_init(meth) \ fobj__consume(fobj__nm_mhandle(meth)()) #define fobj__klass_init(klass) \ fobj__consume(fobj__nm_khandle(klass)()) -#define fobj__klass_decl_method(klass, meth, ...) \ - fobj__nm_wrap_decl(meth)(fobj__nm_klass_meth(klass, meth)), -#define fobj__klass_decl_method_loop(klass, ...) \ - fm_foreach_arg(fobj__klass_decl_method, klass, __VA_ARGS__) - -#define fobj__klass_decl_methods_mth(klass, ...) \ - fm_recurs(fobj__klass_decl_method_loop)(klass, __VA_ARGS__) -#define fobj__klass_decl_methods_varsized(...) -#define fobj__klass_decl_methods_inherits(klass, parent) -#define fobj__klass_decl_methods_iface(...) -#define fobj__klass_decl_methods_dispatch(klass, tag, ...) \ - fobj__klass_decl_methods_##tag(klass, __VA_ARGS__) -#define fobj__klass_decl_methods(klass, ...) \ - fm_eval(fm_foreach_tuple_arg(\ - fobj__klass_decl_methods_dispatch, klass, __VA_ARGS__)) - -#define fobj__klass_has_iface_varsized -#define fobj__klass_has_iface_mth -#define fobj__klass_has_iface_inherits -#define fobj__klass_has_iface_iface 1 -#define fobj__klass_has_iface_impl(tag, ...) \ - fobj__klass_has_iface_##tag -#define fobj__klass_has_iface(...) \ - fm_eval_tuples(fobj__klass_has_iface_impl, __VA_ARGS__) - -#define fobj__klass_check_dispatch_varsized(...) -#define fobj__klass_check_dispatch_mth(...) -#define fobj__klass_check_dispatch_inherits(...) -#define fobj__klass_check_dispatch_iface(klass, ...) \ - fm_recurs(fobj__klass_check_dispatch_iface_i)(klass, __VA_ARGS__) -#define fobj__klass_check_dispatch_iface_i(klass, ...) \ - fm_foreach_arg(fobj__klass_check_one_iface, klass, __VA_ARGS__) -#define fobj__klass_check_one_iface(klass, iface) \ - fobj__nm_kvalidate(iface)(khandle); -#define fobj__klass_check_dispatch(klass, tag, ...) \ - fobj__klass_check_dispatch_##tag(klass, __VA_ARGS__) -#define fobj__klass_check_iface(klass, ...) \ - fm_eval_tuples_arg(fobj__klass_check_dispatch, klass, __VA_ARGS__) - -#define fobj__add_methods_loop(klass, ...) \ - fm_foreach_arg(fobj__add_methods_do, klass, __VA_ARGS__) -#define fobj__add_methods_do(klass, meth, ...) \ - fm_recurs(fobj__add_methods_do_)(klass, meth, ...) -#define fobj__add_methods_do_(klass, meth, ...) \ - fobj__nm_register(meth)(\ - fobj__nm_khandle(klass)(), \ - fobj__nm_klass_meth(klass, meth)); - /* add methods after class declaration */ #define fobj__add_methods(klass, ...) do { \ fobj_klass_handle_t khandle = fobj__nm_khandle(klass)(); \ - fm_eval(fobj__add_methods_loop(klass, __VA_ARGS__)) \ + fm_eval_foreach_arg(fobj__add_methods_do, klass, __VA_ARGS__) \ } while (0) +#define fobj__add_methods_do(klass, meth) \ + fobj__nm_register(meth)(khandle, fobj__nm_klass_meth(klass, meth)); /* Instance creation */ #define fobj__alloc(klass, ...) \ @@ -400,40 +320,42 @@ typedef struct { /* Interface declaration */ #define fobj__iface_declare(iface) \ - fobj__iface_declare_i(iface, fobj__map_params(fobj__nm_iface(iface))) + fobj__iface_declare_i(iface, fobj__map_params(fobj__nm_iface(iface), iface__fobj)) \ + fm__dumb_require_semicolon + #define fobj__iface_declare_i(iface, ...) \ fobj__iface_declare_impl(iface, \ fobj__nm_iface_i(iface), fobj__nm_bind(iface), \ fobj__nm_bindref(iface), fobj__nm_implements(iface), \ - fobj__nm_kvalidate(iface), __VA_ARGS__) + fobj__nm_kvalidate(iface), (fobj__flat_methods(__VA_ARGS__))) #define fobj__iface_declare_impl(iface, iface_i, \ bind_iface, bindref_iface, implements_iface, \ - kvalidate, ...) \ - fobj__mapMethods_toHandlers(__VA_ARGS__) \ + kvalidate, methods) \ + fobj__mapMethods_toHandlers methods \ typedef union iface_i { \ fobj_t self; \ - fobj__mapMethods_toFields(__VA_ARGS__) \ + fobj__mapMethods_toFields methods \ } iface_i; \ \ - static ft_unused inline iface_i \ + ft_inline iface_i \ bind_iface(fobj_t self) { \ iface_i _iface = (iface_i){ .self = self }; \ - fobj__mapMethods_toSetters(__VA_ARGS__) \ + fobj__mapMethods_toSetters methods \ return _iface; \ } \ \ - static ft_unused inline bool \ + ft_inline bool \ implements_iface(fobj_t self, iface_i *ifacep) { \ iface_i _iface = (iface_i){ .self = self }; \ bool all_ok = true; \ - fobj__mapMethods_toIfSetters(__VA_ARGS__) \ + fobj__mapMethods_toIfSetters methods \ if (ifacep != NULL) \ *ifacep = all_ok ? _iface : (iface_i){NULL}; \ return all_ok; \ } \ \ - static ft_unused inline iface_i \ + ft_inline iface_i \ bindref_iface(fobj_t self) { \ iface_i _iface = bind_iface(self); \ fobj_ref(_iface.self); \ @@ -442,10 +364,33 @@ typedef struct { \ ft_inline void \ kvalidate(fobj_klass_handle_t khandle) { \ - fobj__kvalidateMethods(__VA_ARGS__) \ - } \ - \ - fm__dumb_require_semicolon + fobj__kvalidateMethods methods \ + } + +#define fobj__mapMethods_toHandlers(...) \ + fm_eval_foreach(fobj__mapMethods_toHandlers_do, __VA_ARGS__) +#define fobj__mapMethods_toHandlers_do(m) \ + fobj__predefine_method(m); + +#define fobj__mapMethods_toFields(...) \ + fm_eval_foreach(fobj__mapMethods_toFields_do, __VA_ARGS__) +#define fobj__mapMethods_toFields_do(m) \ + uintptr_t fobj__nm_has(m); + +#define fobj__mapMethods_toSetters(...) \ + fm_eval_foreach(fobj__mapMethods_toSetters_do, __VA_ARGS__) +#define fobj__mapMethods_toSetters_do(meth) \ + ft_assert(fobj_method_implements(self, fobj__nm_mhandle(meth)())); + +#define fobj__mapMethods_toIfSetters(...) \ + fm_eval_foreach(fobj__mapMethods_toIfSetters_do, __VA_ARGS__) +#define fobj__mapMethods_toIfSetters_do(meth) \ + all_ok &= fobj_method_implements(self, fobj__nm_mhandle(meth)()); + +#define fobj__kvalidateMethods(...) \ + fm_eval_foreach(fobj__kvalidateMethods_do, __VA_ARGS__) +#define fobj__kvalidateMethods_do(meth) \ + ft_assert(fobj_klass_method_search(khandle, fobj__nm_mhandle(meth)()) != NULL); #ifndef NDEBUG #define fobj_reduce(newifacetype, oldiface) ({ \ @@ -461,74 +406,23 @@ typedef struct { ((fobj__nm_iface_i(newifacetype)){.self = (oldiface).self}) #endif -#define fobj__mapMethods_toCopyChecks_do_mth(meth) \ - _new_iface_.fobj__nm_has(meth) = _old_iface_.fobj__nm_has(meth); -#define fobj__mapMethods_toCopyChecks_loop(tag, ...) \ - fm_foreach(fobj__mapMethods_toCopyChecks_do_##tag, __VA_ARGS__) -#define fobj__mapMethods_toCopyChecks_do(tag, ...) \ - fm_recurs(fobj__mapMethods_toCopyChecks_loop)(tag, __VA_ARGS__) -#define fobj__mapMethods_toCopyChecks_i(...) \ - fm_foreach_tuple(fobj__mapMethods_toCopyChecks_do, __VA_ARGS__) -#define fobj__mapMethods_toCopyChecks_i1(iface, def) \ - fm_eval(fobj__mapMethods_toCopyChecks_i(def)) #define fobj__mapMethods_toCopyChecks(iface) \ - fobj__mapMethods_toCopyChecks_i1(iface, \ - fm_expand fm_if(fobj__macroIsIface(iface), \ - (fobj__map_params(fobj__nm_iface(iface))), \ - ((mth, iface)))) + fobj__mapMethods_toCopyChecks_i( \ + fm_iif(fobj__macroIsIface(iface)) \ + (fobj__map_params(fobj__nm_iface(iface))) \ + ((mth, iface))) +#define fobj__mapMethods_toCopyChecks_i(...) \ + fm_eval_foreach(fobj__mapMethods_toCopyChecks_do, fobj__flat_methods(__VA_ARGS__)) +#define fobj__mapMethods_toCopyChecks_do(meth) \ + _new_iface_.fobj__nm_has(meth) = _old_iface_.fobj__nm_has(meth); #define fobj__macroIsIface(iface) \ - fm_is_empty(fm_eval(fobj__macroIsIface_i(fobj__nm_iface(iface)))) -#define fobj__macroIsIface_mth(...) + fm_is_empty(fobj__macroIsIface_i(fobj__nm_iface(iface))) +#define fobj__macroIsIface_i(...) \ + fm_eval_foreach(fobj__macroIsIface_do, __VA_ARGS__) #define fobj__macroIsIface_do(x) \ fobj__macroIsIface_##x -#define fobj__macroIsIface_i(...) \ - fm_foreach(fobj__macroIsIface_do, __VA_ARGS__) - -#define fobj__mapMethods_toHandlers_do_do(m) \ - fobj__predefine_method(m); -#define fobj__mapMethods_toHandlers_loop(...) \ - fm_foreach(fobj__mapMethods_toHandlers_do_do, __VA_ARGS__) -#define fobj__mapMethods_toHandlers_do(tag, ...) \ - fm_recurs(fobj__mapMethods_toHandlers_loop)(__VA_ARGS__) -#define fobj__mapMethods_toHandlers(...) \ - fm_eval_tuples(fobj__mapMethods_toHandlers_do, __VA_ARGS__) - - -#define fobj__mapMethods_toFields_do_do(m) uintptr_t fobj__nm_has(m); -#define fobj__mapMethods_toFields_loop(...) \ - fm_foreach(fobj__mapMethods_toFields_do_do, __VA_ARGS__) -#define fobj__mapMethods_toFields_do(tag, ...) \ - fm_recurs(fobj__mapMethods_toFields_loop)(__VA_ARGS__) -#define fobj__mapMethods_toFields(...) \ - fm_eval_tuples(fobj__mapMethods_toFields_do, __VA_ARGS__) - -#define fobj__mapMethods_toSetters_do_mth(meth) \ - ft_assert(fobj_method_implements(self, fobj__nm_mhandle(meth)())); -#define fobj__mapMethods_toSetters_loop(tag, ...) \ - fm_foreach(fobj__mapMethods_toSetters_do_##tag, __VA_ARGS__) -#define fobj__mapMethods_toSetters_do(tag, ...) \ - fm_recurs(fobj__mapMethods_toSetters_loop)(tag, __VA_ARGS__) -#define fobj__mapMethods_toSetters(...) \ - fm_eval_tuples(fobj__mapMethods_toSetters_do, __VA_ARGS__) - -#define fobj__mapMethods_toIfSetters_do_mth(meth) \ - if (!fobj_method_implements(self, fobj__nm_mhandle(meth)())) all_ok = false; -#define fobj__mapMethods_toIfSetters_loop(tag, ...) \ - fm_foreach(fobj__mapMethods_toIfSetters_do_##tag, __VA_ARGS__) -#define fobj__mapMethods_toIfSetters_do(tag, ...) \ - fm_recurs(fobj__mapMethods_toIfSetters_loop)(tag, __VA_ARGS__) -#define fobj__mapMethods_toIfSetters(...) \ - fm_eval_tuples(fobj__mapMethods_toIfSetters_do, __VA_ARGS__) - -#define fobj__kvalidateMethods_do_mth(meth) \ - ft_assert(fobj_klass_method_search(khandle, fobj__nm_mhandle(meth)()) != NULL); -#define fobj__kvalidateMethods_loop(tag, ...) \ - fm_foreach(fobj__kvalidateMethods_do_##tag, __VA_ARGS__) -#define fobj__kvalidateMethods_do(tag, ...) \ - fm_recurs(fobj__kvalidateMethods_loop)(tag, __VA_ARGS__) -#define fobj__kvalidateMethods(...) \ - fm_eval_tuples(fobj__kvalidateMethods_do, __VA_ARGS__) +#define fobj__macroIsIface_mth(...) /* Method invocation */ @@ -566,54 +460,10 @@ typedef struct { /* Named params passing hazzles with optional and defaults */ #define fobj_pass_params(meth, ...) \ - fm_cat(fobj__pass_params_impl_, fm_no_va(__VA_ARGS__))( \ - meth, fobj__nm_params_t(meth), __VA_ARGS__) -#define fobj__pass_params_impl_1(meth, meth__params_t, ...) \ - ((meth__params_t){fobj__params_defaults(meth)}) -#if !defined(NDEBUG) && !defined(__TINYC__) -#define fobj__pass_params_impl_0(meth, meth__params_t, ...) \ - ({ \ - fobj__push_ignore_initializer_overrides; \ - (meth__params_t) { \ - fobj__params_defaults(meth), \ - fm_eval(fm_foreach_comma(fobj__pass_params_each, __VA_ARGS__)) \ - }; \ - fobj__pop_ignore_initializer_overrides; \ - }) -#else -#define fobj__pass_params_impl_0(meth, meth__params_t, ...) \ - ((meth__params_t){\ - fobj__params_defaults(meth), \ - fm_eval(fm_foreach_comma(fobj__pass_params_each, __VA_ARGS__)) \ - }) -#endif + ((fobj__nm_params_t(meth)){fm_eval_foreach_comma(fobj__pass_params_each, __VA_ARGS__)}) -#ifndef NDEBUG #define fobj__pass_params_each(param) \ param, fobj__dumb_arg -#else -#define fobj__pass_params_each(param) \ - param -#endif - -#define fobj__params_defaults(meth) \ - fobj__params_defaults_i(meth, fobj__nm_mthdflt(meth)()) \ - .fobj__dumb_first_param = fobj__dumb_arg -#define fobj__params_defaults_i(meth, ...) \ - fm_when(fm_is_tuple(fm_head(__VA_ARGS__))) ( \ - fobj__params_defaults_impl(__VA_ARGS__) \ - ) -#define fobj__params_defaults_impl(...) \ - fm_eval(fm_foreach_tuple(fobj__params_defaults_each, __VA_ARGS__)) -#ifndef NDEBUG -#define fobj__params_defaults_each(x, ...) \ - fm_when(fm_isnt_empty(__VA_ARGS__))( .x = __VA_ARGS__, )\ - .fobj__nm_given(x) = fobj__dumb_arg, -#else -#define fobj__params_defaults_each(x, ...) \ - fm_when(fm_isnt_empty(__VA_ARGS__))( .x = __VA_ARGS__, ) -#endif - #define fobj_bind(iface, obj) fobj__nm_bind(iface)(obj) @@ -651,7 +501,7 @@ extern void* fobj_klass_method_search(fobj_klass_handle_t klass, fobj_method_handle_t meth); extern void fobj__validate_args(fobj_method_handle_t meth, fobj_t self, - const char** paramnames, const char *set, size_t cnt); + const char* const * paramnames, const char *set, size_t cnt); /* Variable set helpers */ diff --git a/src/fu_util/impl/fo_impl2.h b/src/fu_util/impl/fo_impl2.h index 1d42acc18..916714997 100644 --- a/src/fu_util/impl/fo_impl2.h +++ b/src/fu_util/impl/fo_impl2.h @@ -6,21 +6,58 @@ #include #include +enum fobjStrType { + FOBJ_STR_SMALL = 1, + FOBJ_STR_UNOWNED, + FOBJ_STR_PTR, +}; +#define FOBJ_STR_SMALL_SIZE ((1<<14)-1) +#define FOBJ_STR_FREE_SPACE (sizeof(fobjStr) - offsetof(fobjStr, small.buf)) + +union fobjStr { + struct { + uint16_t type:2; + }; + struct { + uint16_t type:2; + uint16_t len:14; + char buf[]; + } small; + struct { + uint16_t type:2; + uint32_t len; + char* ptr; + } ptr; +}; + ft_inline fobjStr* fobj_str(const char* s) { - return fobj_newstr(ft_cstr(s), false); + return fobj_newstr(ft_cstr(s), FOBJ_STR_COPY); +} + +ft_inline fobjStr* +fobj_str_const(const char* s) { + return fobj_newstr(ft_cstr(s), FOBJ_STR_CONST); } ft_inline fobjStr* fobj_strbuf_steal(ft_strbuf_t *buf) { - fobjStr* str = fobj_newstr(ft_strbuf_ref(buf), buf->alloced); - *buf = (ft_strbuf_t){NULL}; - return str; + if (buf->len < FOBJ_STR_FREE_SPACE && !buf->alloced) + return fobj_newstr(ft_strbuf_ref(buf), FOBJ_STR_COPY); + return fobj_newstr(ft_strbuf_steal(buf), FOBJ_STR_GIFTED); } ft_inline ft_str_t fobj_getstr(fobjStr *str) { - return ft_str(str->ptr, str->len); + switch (str->type) { + case FOBJ_STR_SMALL: + return ft_str(str->small.buf, str->small.len); + case FOBJ_STR_PTR: + case FOBJ_STR_UNOWNED: + return ft_str(str->ptr.ptr, str->ptr.len); + default: + ft_log(FT_FATAL, "Unknown fobj_str type %d", str->type); + } } ft_inline fobjStr* @@ -31,7 +68,7 @@ fobj_strcatc(fobjStr *ostr, const char *str) { ft_inline fobjStr* fobj_strcatc2(fobjStr *ostr, const char *str1, const char *str2) { /* a bit lazy to do it in a fast way */ - return fobj_strcatf(ostr, "%s%s", str1, str2); + return fobj_strcat2(ostr, ft_cstr(str1), ft_cstr(str2)); } ft_inline fobjStr* @@ -94,7 +131,7 @@ struct fobjErr { }; #define fobj_make_err(type, ...) \ - fm_cat(fobj_make_err_, fm_va_012(__VA_ARGS__))(type, __VA_ARGS__) + fm_cat(fobj_make_err_, fm_va_01n(__VA_ARGS__))(type, __VA_ARGS__) #define fobj_make_err_0(type, ...) ({ \ fobj__make_err(fobj_error_kind_##type(), \ ft__srcpos(), "Unspecified Error", NULL, 0); \ @@ -103,7 +140,7 @@ struct fobjErr { fobj__make_err(fobj_error_kind_##type(), \ ft__srcpos(), msg, NULL, 0); \ }) -#define fobj_make_err_2(type, msg, ...) ({ \ +#define fobj_make_err_n(type, msg, ...) ({ \ fobj_err_kv_t kvs[] = { \ fobj__err_transform_kv(__VA_ARGS__) \ }; \ @@ -182,7 +219,13 @@ fobj_errsrc(err_i err) { return self->src; } -#define fobj__printkv(fmt, ...) ({ \ +#define fobj__printkv(fmt, ...) \ + fm_cat(fobj__printkv_, fm_va_01(__VA_ARGS__))(fmt, __VA_ARGS__) + +#define fobj__printkv_0(fmt, ...) \ + fobj_printkv(fmt, ft_slc_fokv_make(NULL, 0)) + +#define fobj__printkv_1(fmt, ...) ({ \ fobj_kv kvs[] = { \ fobj__transform_fokv(__VA_ARGS__) \ }; \ diff --git a/src/fu_util/impl/ft_impl.h b/src/fu_util/impl/ft_impl.h index b37e386f8..752da826f 100644 --- a/src/fu_util/impl/ft_impl.h +++ b/src/fu_util/impl/ft_impl.h @@ -84,7 +84,7 @@ extern void ft__register_source(const char *file, } while(0) #define ft__log_fmt_msg(fmt, ...) \ - fm_tuple_expand(fm_if(fm_no_va(__VA_ARGS__), ("%s", fmt), (fmt, __VA_ARGS__))) + fm_iif(fm_no_va(__VA_ARGS__))("%s", fmt)(fmt, __VA_ARGS__) extern ft_gnu_printf(4, 5) void ft__log(enum FT_LOG_LEVEL level, ft_source_position_t srcpos, const char* error, const char *fmt, ...); @@ -226,6 +226,17 @@ ft_inline uint32_t ft_randn(uint32_t mod) { return ft_fast_randmod(ft_rand(), mod); } +ft_inline uint32_t ft_rand32(uint32_t* state, uint32_t mod) { + uint32_t x = *state; + uint32_t r = ft_rol32(x, 15); + x ^= x << 13; + x ^= x >> 17; + x ^= x << 5; + *state = x; + r += x; + return mod ? ft_fast_randmod(r, mod) : r; +} + /* ft_val_t */ struct ft_arg { union { diff --git a/src/fu_util/test/obj1.c b/src/fu_util/test/obj1.c index 432f12761..db5f9b0b0 100644 --- a/src/fu_util/test/obj1.c +++ b/src/fu_util/test/obj1.c @@ -25,12 +25,11 @@ fobj_iface(ioReadCloser); fobj_iface(ioReader); fobj_iface(obj); - #define kls__Klass0 mth(fobjDispose), \ iface__ioReader, mth(fobjGetError) #define kls__KlassA inherits(Klass0), \ iface__ioReadCloser, \ - mth(ioStatus), iface(ioReadCloser, ioReader) + mth(ioStatus), iface(ioReadCloser, ioReader, ioRead) fobj_klass(Klass0); fobj_klass(KlassA); @@ -39,7 +38,6 @@ typedef struct Klass0 { int x; } Klass0; - typedef struct KlassA { Klass0 p; size_t offset; @@ -65,7 +63,7 @@ fobj_error_float_key(myy); static err_i Klass0_fobjGetError(VSelf) { Self(Klass0); - return $err(RT, "WTF ERROR {myx:05d} {myy:9.4f}", (myx, self->x), (myy, 100.001)); + return $err(RT, "WTF ERROR {myx:05d} {myy:9.4f}", myx(self->x), myy(100.001)); } static int @@ -118,15 +116,18 @@ int main(int argc, char** argv) { if (verbose) { //ft_log_level_reset(FT_LOG); ft_log_level_set(__FILE__, FT_DEBUG); + } else { + ft_log_level_set("ALL", FT_ERROR); } fobj_klass_init(Klass0); fobj_klass_init(KlassA); + fobj_add_methods(KlassA, ioRead); fobj_freeze(); KlassA *a = $alloc(KlassA, .offset = 1, .p.x = 2); - logf("a=%s", fobjRepr(a)->ptr); + logf("a=%s", $repr(a)); logf("Before block 1 enter"); { @@ -209,30 +210,28 @@ int main(int argc, char** argv) { err = $(fobjGetError, a); logf("Error: %s", $errmsg(err)); - logf("Error: %s", $itostr(err, NULL)->ptr); - logf("Error: %s", $itostr(err, "$T $M $K")->ptr); + logf("Error: %s", $itostr(err, NULL)); + logf("Error: %s", $itostr(err, "$T $M $K")); ioRead(a, b, strlen($errmsg(err))); $(ioRead, a, b, strlen($errmsg(err))); $(ioRead, a, b, $(ioRead, a, b, $(ioStatus, a))); logf("Error: %s", $errmsg($(fobjGetError, a))); - errno = ENOENT; - err = $syserr(); + err = $syserr(ENOENT); logf("Error: %s", $errmsg(err)); - logf("Error: %s", $irepr(err)->ptr); + logf("Error: %s", $irepr(err)); errno = ENOENT; - err = $syserr("Opening file"); + err = $syserr(errno, "Opening file"); logf("Error: %s", $errmsg(err)); - logf("Error: %s", $irepr(err)->ptr); - errno = ENOENT; - err = $syserr("Opening file {path}", (path, "folder/read.me")); + logf("Error: %s", $irepr(err)); + err = $syserr(ENOENT, "Opening file {path}", path("folder/read.me")); logf("Error: %s", $errmsg(err)); - logf("Error: %s", $irepr(err)->ptr); + logf("Error: %s", $irepr(err)); logf("Errno: %d", getErrno(err)); Klass0 *k0 = $alloc(Klass0); aird = bind_ioRead(k0); - ioRead__cb k0_ioRead = fetch_cb_ioRead(k0, fobj_self_klass, true); + ioRead__cb_t k0_ioRead = ioRead__fetch_cb(k0, fobj_self_klass, true); for (i = 0; i < benchcnt; i++) { switch (benchmode) { case 0: ioRead(k0, b, 100); break; @@ -247,7 +246,7 @@ int main(int argc, char** argv) { $(ioStatus, a); { - ioRead_i bird = {NULL}; + ioRead_i bird = $null(ioRead); $iset(&bird, aird); $iswap(&bird, aird); $iref(bird); @@ -269,7 +268,6 @@ int main(int argc, char** argv) { fobjStr *stre = fobj_stradd(strc, strd); - ft_assert(stre->len == strc->len + strd->len); ft_assert(fobj_streq_c(stre, "this is string a??????this is b!!")); stre = fobj_sprintf("%s:%d", "hello", 1); @@ -285,7 +283,7 @@ int main(int argc, char** argv) { strf = $fmt("Some {usual:8s} things cost > $${money:-8.4f}$$", (usual, $S("scary")), (money, $F(12.48))); ft_assert(fobj_streq_c(strf, "Some scary things cost > $$12.4800 $$"), - "String is '%s'", strf->ptr); + "String is '%s'", $tostr(strf)); logf("BEFORE EXIT"); } diff --git a/src/fu_util/test/qsort/sort_template.h b/src/fu_util/test/qsort/sort_template.h index fd24f5962..f204bf835 100644 --- a/src/fu_util/test/qsort/sort_template.h +++ b/src/fu_util/test/qsort/sort_template.h @@ -117,7 +117,6 @@ */ #define CppConcat(x, y) x##y #define pg_noinline __attribute__((noinline)) -typedef uint8_t uint8; #define Min(a, b) ((a) < (b) ? (a) : (b)) #define ST_MAKE_PREFIX(a) CppConcat(a,_) @@ -237,7 +236,7 @@ ST_SCOPE void ST_SORT(ST_ELEMENT_TYPE * first, size_t n #define DO_SWAPN(a_, b_, n_) ST_SWAPN((a_), (b_), (n_)) #define DO_SWAP(a_, b_) ST_SWAP((a_), (b_)) #else -#define ST_POINTER_TYPE uint8 +#define ST_POINTER_TYPE uint8_t #define ST_POINTER_STEP element_size #define DO_SWAPN(a_, b_, n_) ST_SWAPN((a_), (b_), (n_)) #define DO_SWAP(a_, b_) DO_SWAPN((a_), (b_), element_size) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 6f6dcdff6..65fe1041f 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -98,10 +98,6 @@ extern const char *PROGRAM_EMAIL; #define LOCK_STALE_TIMEOUT 30 #define LOG_FREQ 10 -/* Directory/File permission */ -#define DIR_PERMISSION (0700) -#define FILE_PERMISSION (0600) - /* 64-bit xid support for PGPRO_EE */ #ifndef PGPRO_EE #define XID_FMT "%u" diff --git a/src/utils/file.c b/src/utils/file.c index d492bf2c6..3059a5605 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -4789,11 +4789,11 @@ pioCopyWithFilters(pioWriteFlush_i dest, pioRead_i src, if ($ifdef(err = , pioSetAsync, src.self) && $haserr(err)) elog(ERROR, "Cannot enable async mode on source \"%s\": %s", - $irepr(src)->ptr, $errmsg(err)); + $irepr(src), $errmsg(err)); if ($ifdef(err = , pioSetAsync, dest.self) && $haserr(err)) elog(ERROR, "Cannot enable async mode on destination \"%s\": %s", - $irepr(dest)->ptr, $errmsg(err)); + $irepr(dest), $errmsg(err)); for (i = nfilters - 1; i >= 0; i--) dest = pioWrapWriteFilter(dest, filters[i], OUT_BUF_SIZE); @@ -4820,7 +4820,7 @@ pioCopyWithFilters(pioWriteFlush_i dest, pioRead_i src, $ireturn(err); $ireturn($err(SysErr, "Short write to destination file {path}: {writtenSz} < {wantedSz}", - path($irepr(dest)->ptr), + path($irepr(dest)), wantedSz(read_len), writtenSz(write_len))); } } @@ -4829,7 +4829,7 @@ pioCopyWithFilters(pioWriteFlush_i dest, pioRead_i src, err = $i(pioFlush, dest); if ($haserr(err)) $ireturn($err(SysErr, "Cannot flush file {path}: {cause}", - path($irepr(dest)->ptr), cause(err.self))); + path($irepr(dest)), cause(err.self))); return $noerr(); } diff --git a/src/utils/file.h b/src/utils/file.h index 6d10bf33d..7fd1e7919 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -12,6 +12,10 @@ #include +/* Directory/File permission */ +#define DIR_PERMISSION (0700) +#define FILE_PERMISSION (0600) + typedef enum { /* message for compatibility check */ From f51448855170973afc335a959d4ff02b3d077bf2 Mon Sep 17 00:00:00 2001 From: Vyacheslav Makarov Date: Thu, 25 Aug 2022 16:12:11 +0300 Subject: [PATCH 037/339] [PBCKP-254] fixing build errors after the merge 1. [commit 549d98ab93a80e228a2f4551194383395f88f32e] In the PBCKP-181 task, the 'get_log_message' function that was used in the PBCKP-169 task was removed. Corrected it. 2. The order of arguments for 'fio_*' functions has been changed. [commit 5a6bd0190] Fixed conflicts that came with the task [PBCKP-153]. [commit 41855701c] --- src/dir.c | 4 ++-- src/utils/logger.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/dir.c b/src/dir.c index c0ffd1a04..e555cfc27 100644 --- a/src/dir.c +++ b/src/dir.c @@ -1066,10 +1066,10 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba waldir_path, to_path); /* create tablespace directory from waldir_path*/ - fio_mkdir(waldir_path, pg_tablespace_mode, location); + fio_mkdir(location, waldir_path, pg_tablespace_mode, false); /* create link to linked_path */ - if (fio_symlink(waldir_path, to_path, incremental, location) < 0) + if (fio_symlink(location, waldir_path, to_path, incremental) < 0) elog(ERROR, "Could not create symbolic link \"%s\": %s", to_path, strerror(errno)); diff --git a/src/utils/logger.c b/src/utils/logger.c index 5b4fad0c0..e58802e28 100644 --- a/src/utils/logger.c +++ b/src/utils/logger.c @@ -506,12 +506,12 @@ elog_stderr(int elevel, const char *fmt, ...) json_add_value(buf_json, "pid", str_pid, 0, true); json_add_key(buf_json, "level", 0); write_elevel_for_json(buf_json, elevel); - message = get_log_message(fmt, args); + message = ft_vasprintf(fmt, args).ptr; json_add_value(buf_json, "msg", message, 0, true); json_add_value(buf_json, "my_thread_num", str_thread_json, 0, true); json_add_min(buf_json, JT_END_OBJECT); fputs(buf_json->data, stderr); - pfree(message); + ft_free(message); termPQExpBuffer(buf_json); } else From 249876ad2b6a1b74e175ae4585e1c99fc29f3378 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 26 Aug 2022 17:16:38 +0300 Subject: [PATCH 038/339] [PBCKP-232] remove deprecated options (master-db, master-host, master-port, master-user, replica-timeout) part 2 --- src/configure.c | 25 +++++++++++++++++++++- tests/compatibility.py | 48 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 72 insertions(+), 1 deletion(-) diff --git a/src/configure.c b/src/configure.c index 47433346f..b828a30e5 100644 --- a/src/configure.c +++ b/src/configure.c @@ -34,13 +34,15 @@ static void show_configure_json(ConfigOption *opt); #define OPTION_INSTANCE_GROUP "Backup instance information" #define OPTION_CONN_GROUP "Connection parameters" -#define OPTION_REPLICA_GROUP "Replica parameters" #define OPTION_ARCHIVE_GROUP "Archive parameters" #define OPTION_LOG_GROUP "Logging parameters" #define OPTION_RETENTION_GROUP "Retention parameters" #define OPTION_COMPRESS_GROUP "Compression parameters" #define OPTION_REMOTE_GROUP "Remote access parameters" +/* dummy placeholder for obsolete options to store in following instance_options[] */ +static char *obsolete_option_placeholder = NULL; + /* * Short name should be non-printable ASCII character. */ @@ -90,6 +92,27 @@ ConfigOption instance_options[] = &instance_config.conn_opt.pguser, SOURCE_CMD, 0, OPTION_CONN_GROUP, 0, option_get_value }, + /* Obsolete options */ + { + 's', 202, "master-db", + &obsolete_option_placeholder, SOURCE_FILE_STRICT, SOURCE_CONST, "", 0, option_get_value + }, + { + 's', 203, "master-host", + &obsolete_option_placeholder, SOURCE_FILE_STRICT, SOURCE_CONST, "", 0, option_get_value + }, + { + 's', 204, "master-port", + &obsolete_option_placeholder, SOURCE_FILE_STRICT, SOURCE_CONST, "", 0, option_get_value + }, + { + 's', 205, "master-user", + &obsolete_option_placeholder, SOURCE_FILE_STRICT, SOURCE_CONST, "", 0, option_get_value + }, + { + 's', 206, "replica-timeout", + &obsolete_option_placeholder, SOURCE_FILE_STRICT, SOURCE_CONST, "", 0, option_get_value + }, /* Archive options */ { 'u', 207, "archive-timeout", diff --git a/tests/compatibility.py b/tests/compatibility.py index e274c22be..e3aab15e0 100644 --- a/tests/compatibility.py +++ b/tests/compatibility.py @@ -1482,3 +1482,51 @@ def test_compatibility_tablespace(self): # Clean after yourself self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + def test_compatibility_master_options(self): + """ + Test correctness of handling of removed master-db, master-host, master-port, + master-user and replica-timeout options + """ + self.assertTrue( + self.version_to_num(self.old_probackup_version) <= self.version_to_num('2.6.0'), + 'You need pg_probackup old_binary =< 2.6.0 for this test') + + fname = self.id().split('.')[3] + node = self.make_simple_node(base_dir=os.path.join(module_name, fname, 'node')) + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + + self.init_pb(backup_dir, old_binary=True) + self.add_instance(backup_dir, 'node', node, old_binary=True) + + # add deprecated options (using probackup< 2.6) into pg_probackup.conf + # don't care about option values, we can use random values here + self.set_config( + backup_dir, 'node', + options=[ + '--master-db=postgres', + '--master-host=localhost', + '--master-port=5432', + '--master-user={0}'.format(self.user), + '--replica-timeout=100500'], + old_binary=True) + + # and try to show config with new binary (those options must be silently skipped) + self.show_config(backup_dir, 'node', old_binary=False) + + # store config with new version (those options must disappear from config) + self.set_config( + backup_dir, 'node', + options=[], + old_binary=False) + + # and check absence + config_options = self.show_config(backup_dir, 'node', old_binary=False) + self.assertFalse( + ['master-db', 'master-host', 'master-port', 'master-user', 'replica-timeout'] & config_options.keys(), + 'Obsolete options found in new config') + + # Clean after yourself + self.del_test_dir(module_name, fname) + From dcf52ee5c826216b994ab4d83ff6fe4483656000 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 26 Aug 2022 19:52:27 +0300 Subject: [PATCH 039/339] [PBCKP-230] remove instr_time.h from borrowed filelist --- Makefile | 2 -- src/archive.c | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 2b663f59b..e9ed487cc 100644 --- a/Makefile +++ b/Makefile @@ -40,7 +40,6 @@ OBJS += src/archive.o src/backup.o src/catalog.o src/checkdb.o src/configure.o s # sources borrowed from postgresql (paths are relative to pg top dir) BORROWED_H_SRC := \ - src/include/portability/instr_time.h \ src/bin/pg_basebackup/receivelog.h \ src/bin/pg_basebackup/streamutil.h \ src/bin/pg_basebackup/walmethods.h @@ -87,7 +86,6 @@ override CPPFLAGS := -DFRONTEND $(CPPFLAGS) $(PG_CPPFLAGS) PG_LIBS_INTERNAL = $(libpq_pgport) ${PTHREAD_CFLAGS} # additional dependencies on borrowed files -src/archive.o: $(BORROW_DIR)/instr_time.h src/backup.o src/catchup.o src/pg_probackup.o: $(BORROW_DIR)/streamutil.h src/stream.o $(BORROW_DIR)/receivelog.o $(BORROW_DIR)/streamutil.o $(BORROW_DIR)/walmethods.o: $(BORROW_DIR)/receivelog.h $(BORROW_DIR)/receivelog.h: $(BORROW_DIR)/walmethods.h diff --git a/src/archive.c b/src/archive.c index 0ebe5e504..b552689cd 100644 --- a/src/archive.c +++ b/src/archive.c @@ -11,7 +11,7 @@ #include #include "pg_probackup.h" #include "utils/thread.h" -#include "instr_time.h" +#include "portability/instr_time.h" static int push_file_internal(const char *wal_file_name, const char *pg_xlog_dir, From a0983d20871eeeec62e9bc7b314f9fcf04350f62 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 26 Aug 2022 20:02:55 +0300 Subject: [PATCH 040/339] Version 2.6.0 --- src/pg_probackup.h | 2 +- tests/expected/option_version.out | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 3f71301fe..68c0276d3 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -335,7 +335,7 @@ typedef enum ShowFormat #define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */ #define FILE_NOT_FOUND (-2) /* file disappeared during backup */ #define BLOCKNUM_INVALID (-1) -#define PROGRAM_VERSION "2.5.7" +#define PROGRAM_VERSION "2.6.0" /* update when remote agent API or behaviour changes */ #define AGENT_PROTOCOL_VERSION 20600 diff --git a/tests/expected/option_version.out b/tests/expected/option_version.out index af186a98c..f3ebaaa25 100644 --- a/tests/expected/option_version.out +++ b/tests/expected/option_version.out @@ -1 +1 @@ -pg_probackup 2.5.7 +pg_probackup 2.6.0 From 360a88b8fc1642d4c930fa76e53c332e1e8898d4 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Sat, 27 Aug 2022 04:09:25 +0300 Subject: [PATCH 041/339] [ci skip] fix identation --- src/catalog.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index b93564f7e..b4be159d1 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1091,15 +1091,15 @@ get_backup_filelist(pgBackup *backup, bool strict) COMP_FILE_CRC32(true, content_crc, buf, strlen(buf)); - get_control_value_str(buf, "path", path, sizeof(path),true); - get_control_value_int64(buf, "size", &write_size, true); - get_control_value_int64(buf, "mode", &mode, true); - get_control_value_int64(buf, "is_datafile", &is_datafile, true); - get_control_value_int64(buf, "is_cfs", &is_cfs, false); - get_control_value_int64(buf, "crc", &crc, true); - get_control_value_str(buf, "compress_alg", compress_alg_string, sizeof(compress_alg_string), false); - get_control_value_int64(buf, "external_dir_num", &external_dir_num, false); - get_control_value_int64(buf, "dbOid", &dbOid, false); + get_control_value_str(buf, "path", path, sizeof(path),true); + get_control_value_int64(buf, "size", &write_size, true); + get_control_value_int64(buf, "mode", &mode, true); + get_control_value_int64(buf, "is_datafile", &is_datafile, true); + get_control_value_int64(buf, "is_cfs", &is_cfs, false); + get_control_value_int64(buf, "crc", &crc, true); + get_control_value_str(buf, "compress_alg", compress_alg_string, sizeof(compress_alg_string), false); + get_control_value_int64(buf, "external_dir_num", &external_dir_num, false); + get_control_value_int64(buf, "dbOid", &dbOid, false); file = pgFileInit(path); file->write_size = (int64) write_size; From e5b41aaf4f292c2cf8f70f7849785b1c572d676b Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Sat, 27 Aug 2022 04:21:39 +0300 Subject: [PATCH 042/339] [ci skip] fix identation --- src/validate.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/validate.c b/src/validate.c index d88de5583..79a450ac8 100644 --- a/src/validate.c +++ b/src/validate.c @@ -67,9 +67,9 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params) /* Check backup server version */ if (strcmp(backup->server_version, PG_MAJORVERSION) != 0) - elog(ERROR, "Backup %s has server version %s, but current pg_probackup binary " + elog(ERROR, "Backup %s has server version %s, but current pg_probackup binary " "compiled with server version %s", - base36enc(backup->start_time), backup->server_version, PG_MAJORVERSION); + base36enc(backup->start_time), backup->server_version, PG_MAJORVERSION); if (backup->status == BACKUP_STATUS_RUNNING) { From 491452ae614d144a71c352de51b3efeccc1ce493 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Sat, 27 Aug 2022 13:33:42 +0300 Subject: [PATCH 043/339] [PBCKP-256] remove src/backend/utils/hash/pg_crc.c borrowing for this * use of crc macros has been split * places where _TRADITIONAL_CRC32 and _CRC32C are used are explicitly marked * added two files to ensure compatibility with pg-11 * at the end of pg-11 support, these new files and places where _TRADITIONAL_CRC32 is used can be deleted --- Makefile | 4 +- src/backup.c | 2 +- src/catalog.c | 12 ++--- src/compatibility/pg-11.c | 98 +++++++++++++++++++++++++++++++++++ src/compatibility/pg-11.h | 67 ++++++++++++++++++++++++ src/data.c | 54 +++++++++++--------- src/dir.c | 104 ++++++++++++++++++++++++++++---------- src/pg_probackup.h | 29 ++--------- src/restore.c | 8 --- src/stream.c | 8 +-- src/util.c | 2 +- src/utils/file.c | 18 +++---- src/utils/pgut.h | 3 +- src/validate.c | 35 +++++++++---- 14 files changed, 325 insertions(+), 119 deletions(-) create mode 100644 src/compatibility/pg-11.c create mode 100644 src/compatibility/pg-11.h diff --git a/Makefile b/Makefile index e9ed487cc..5c50c6d5e 100644 --- a/Makefile +++ b/Makefile @@ -36,7 +36,8 @@ OBJS := src/utils/configuration.o src/utils/json.o src/utils/logger.o \ OBJS += src/archive.o src/backup.o src/catalog.o src/checkdb.o src/configure.o src/data.o \ src/delete.o src/dir.o src/fetch.o src/help.o src/init.o src/merge.o \ src/parsexlog.o src/ptrack.o src/pg_probackup.o src/restore.o src/show.o src/stream.o \ - src/util.o src/validate.o src/datapagemap.o src/catchup.o + src/util.o src/validate.o src/datapagemap.o src/catchup.o \ + src/compatibility/pg-11.o # sources borrowed from postgresql (paths are relative to pg top dir) BORROWED_H_SRC := \ @@ -45,7 +46,6 @@ BORROWED_H_SRC := \ src/bin/pg_basebackup/walmethods.h BORROWED_C_SRC := \ src/backend/access/transam/xlogreader.c \ - src/backend/utils/hash/pg_crc.c \ src/bin/pg_basebackup/receivelog.c \ src/bin/pg_basebackup/streamutil.c \ src/bin/pg_basebackup/walmethods.c diff --git a/src/backup.c b/src/backup.c index 15f1a4d1c..b98935b8e 100644 --- a/src/backup.c +++ b/src/backup.c @@ -1725,7 +1725,7 @@ pg_stop_backup_write_file_helper(const char *path, const char *filename, const c if (S_ISREG(file->mode)) { - file->crc = pgFileGetCRC(full_filename, true, false); + file->crc = pgFileGetCRC32C(full_filename, false); file->write_size = file->size; file->uncompressed_size = file->size; diff --git a/src/catalog.c b/src/catalog.c index b93564f7e..4e132438e 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1067,7 +1067,7 @@ get_backup_filelist(pgBackup *backup, bool strict) files = parray_new(); - INIT_FILE_CRC32(true, content_crc); + INIT_CRC32C(content_crc); while (fgets(buf, lengthof(buf), fp)) { @@ -1089,7 +1089,7 @@ get_backup_filelist(pgBackup *backup, bool strict) hdr_size; pgFile *file; - COMP_FILE_CRC32(true, content_crc, buf, strlen(buf)); + COMP_CRC32C(content_crc, buf, strlen(buf)); get_control_value_str(buf, "path", path, sizeof(path),true); get_control_value_int64(buf, "size", &write_size, true); @@ -1141,7 +1141,7 @@ get_backup_filelist(pgBackup *backup, bool strict) parray_append(files, file); } - FIN_FILE_CRC32(true, content_crc); + FIN_CRC32C(content_crc); if (ferror(fp)) elog(ERROR, "Failed to read from file: \"%s\"", backup_filelist_path); @@ -2538,7 +2538,7 @@ write_backup_filelist(pgBackup *backup, parray *files, const char *root, setvbuf(out, buf, _IOFBF, BUFFERSZ); if (sync) - INIT_FILE_CRC32(true, backup->content_crc); + INIT_CRC32C(backup->content_crc); /* print each file in the list */ for (i = 0; i < parray_num(files); i++) @@ -2606,13 +2606,13 @@ write_backup_filelist(pgBackup *backup, parray *files, const char *root, sprintf(line+len, "}\n"); if (sync) - COMP_FILE_CRC32(true, backup->content_crc, line, strlen(line)); + COMP_CRC32C(backup->content_crc, line, strlen(line)); fprintf(out, "%s", line); } if (sync) - FIN_FILE_CRC32(true, backup->content_crc); + FIN_CRC32C(backup->content_crc); if (fflush(out) != 0) elog(ERROR, "Cannot flush file list \"%s\": %s", diff --git a/src/compatibility/pg-11.c b/src/compatibility/pg-11.c new file mode 100644 index 000000000..52f4b551c --- /dev/null +++ b/src/compatibility/pg-11.c @@ -0,0 +1,98 @@ +/*------------------------------------------------------------------------- + * + * pg-11.c + * PostgreSQL <= 11 compatibility + * + * Portions Copyright (c) 2022, Postgres Professional + * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + *------------------------------------------------------------------------- + */ + +#include + +#if PG_VERSION_NUM < 120000 + +#include "c.h" +#include "utils/pg_crc.h" + +/* From postgresql src/backend/utils/hash/pg_crc.c */ + +/* + * Lookup table for calculating CRC-32 using Sarwate's algorithm. + * + * This table is based on the polynomial + * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x+1. + * (This is the same polynomial used in Ethernet checksums, for instance.) + * Using Williams' terms, this is the "normal", not "reflected" version. + */ + +const uint32 pg_crc32_table[256] = { + 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, + 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, + 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, + 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, + 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, + 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, + 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, + 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, + 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, + 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, + 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, + 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, + 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, + 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, + 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, + 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, + 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, + 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, + 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, + 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, + 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, + 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, + 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, + 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, + 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, + 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, + 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, + 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, + 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, + 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, + 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, + 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, + 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, + 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, + 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, + 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, + 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, + 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, + 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, + 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, + 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, + 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, + 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, + 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, + 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, + 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, + 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, + 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, + 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, + 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, + 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, + 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, + 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, + 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, + 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, + 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, + 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, + 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, + 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, + 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, + 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, + 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, + 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, + 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D +}; + +#endif diff --git a/src/compatibility/pg-11.h b/src/compatibility/pg-11.h new file mode 100644 index 000000000..63a83070a --- /dev/null +++ b/src/compatibility/pg-11.h @@ -0,0 +1,67 @@ +/*------------------------------------------------------------------------- + * + * pg-11.h + * PostgreSQL <= 11 compatibility + * + * Copyright (c) 2022, Postgres Professional + * + * When PG-11 reaches the end of support, we will need to remove + * *_CRC32_COMPAT macros and use *_CRC32C instead. + * And this file will be removed. + *------------------------------------------------------------------------- + */ + +#ifndef PG11_COMPAT_H +#define PG11_COMPAT_H + +#include "utils/pgut.h" + +#if PG_VERSION_NUM >= 120000 + +#define INIT_CRC32_COMPAT(backup_version, crc) \ +do { \ + Assert(backup_version >= 20025); \ + INIT_CRC32C(crc); \ +} while (0) + +#define COMP_CRC32_COMPAT(backup_version, crc, data, len) \ +do { \ + Assert(backup_version >= 20025); \ + COMP_CRC32C((crc), (data), (len)); \ +} while (0) + +#define FIN_CRC32_COMPAT(backup_version, crc) \ +do { \ + Assert(backup_version >= 20025); \ + FIN_CRC32C(crc); \ +} while (0) + +#else /* PG_VERSION_NUM < 120000 */ + +#define INIT_CRC32_COMPAT(backup_version, crc) \ +do { \ + if (backup_version <= 20021 || backup_version >= 20025) \ + INIT_CRC32C(crc); \ + else \ + INIT_TRADITIONAL_CRC32(crc); \ +} while (0) + +#define COMP_CRC32_COMPAT(backup_version, crc, data, len) \ +do { \ + if (backup_version <= 20021 || backup_version >= 20025) \ + COMP_CRC32C((crc), (data), (len)); \ + else \ + COMP_TRADITIONAL_CRC32(crc, data, len); \ +} while (0) + +#define FIN_CRC32_COMPAT(backup_version, crc) \ +do { \ + if (backup_version <= 20021 || backup_version >= 20025) \ + FIN_CRC32C(crc); \ + else \ + FIN_TRADITIONAL_CRC32(crc); \ +} while (0) + +#endif /* PG_VERSION_NUM < 120000 */ + +#endif /* PG11_COMPAT_H */ diff --git a/src/data.c b/src/data.c index 17ae4b91a..f50749497 100644 --- a/src/data.c +++ b/src/data.c @@ -24,6 +24,9 @@ #include "utils/thread.h" +/* for crc32_compat macros */ +#include "compatibility/pg-11.h" + /* Union to ease operations on relation pages */ typedef struct DataPage { @@ -32,7 +35,7 @@ typedef struct DataPage } DataPage; static bool get_page_header(FILE *in, const char *fullpath, BackupPageHeader *bph, - pg_crc32 *crc, bool use_crc32c); + pg_crc32 *crc, uint32 backup_version); #ifdef HAVE_LIBZ /* Implementation of zlib compression method */ @@ -448,7 +451,7 @@ compress_and_backup_page(pgFile *file, BlockNumber blknum, write_buffer_size = compressed_size + sizeof(BackupPageHeader); /* Update CRC */ - COMP_FILE_CRC32(true, *crc, write_buffer, write_buffer_size); + COMP_CRC32C(*crc, write_buffer, write_buffer_size); /* write data page */ if (fio_fwrite(out, write_buffer, write_buffer_size) != write_buffer_size) @@ -529,7 +532,7 @@ backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpat file->read_size = 0; file->write_size = 0; file->uncompressed_size = 0; - INIT_FILE_CRC32(true, file->crc); + INIT_CRC32C(file->crc); /* * Read each page, verify checksum and write it to backup. @@ -628,7 +631,7 @@ backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpat cleanup: /* finish CRC calculation */ - FIN_FILE_CRC32(true, file->crc); + FIN_CRC32C(file->crc); /* dump page headers */ write_page_headers(headers, file, hdr_map, is_merge); @@ -805,7 +808,7 @@ backup_non_data_file(pgFile *file, pgFile *prev_file, file->crc = fio_get_crc32(FIO_DB_HOST, from_fullpath, false); /* ...and checksum is the same... */ - if (EQ_TRADITIONAL_CRC32(file->crc, prev_file->crc)) + if (EQ_CRC32C(file->crc, prev_file->crc)) { file->write_size = BYTES_INVALID; return; /* ...skip copying file. */ @@ -1018,7 +1021,7 @@ restore_data_file_internal(FILE *in, FILE *out, pgFile *file, uint32 backup_vers * or when merging something. Align read_len only when restoring * or merging old backups. */ - if (get_page_header(in, from_fullpath, &(page).bph, NULL, false)) + if (get_page_header(in, from_fullpath, &(page).bph, NULL, backup_version)) { cur_pos_in += sizeof(BackupPageHeader); @@ -1389,7 +1392,7 @@ backup_non_data_file_internal(const char *from_fullpath, ssize_t read_len = 0; char *buf = NULL; - INIT_FILE_CRC32(true, file->crc); + INIT_CRC32C(file->crc); /* reset size summary */ file->read_size = 0; @@ -1485,7 +1488,7 @@ backup_non_data_file_internal(const char *from_fullpath, strerror(errno)); /* update CRC */ - COMP_FILE_CRC32(true, file->crc, buf, read_len); + COMP_CRC32C(file->crc, buf, read_len); file->read_size += read_len; } @@ -1501,7 +1504,7 @@ backup_non_data_file_internal(const char *from_fullpath, cleanup: /* finish CRC calculation and store into pgFile */ - FIN_FILE_CRC32(true, file->crc); + FIN_CRC32C(file->crc); if (in && fclose(in)) elog(ERROR, "Cannot close the file \"%s\": %s", from_fullpath, strerror(errno)); @@ -1678,7 +1681,6 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, bool is_valid = true; FILE *in; pg_crc32 crc; - bool use_crc32c = backup_version <= 20021 || backup_version >= 20025; BackupPageHeader2 *headers = NULL; int n_hdr = -1; off_t cur_pos_in = 0; @@ -1702,7 +1704,7 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, } /* calc CRC of backup file */ - INIT_FILE_CRC32(use_crc32c, crc); + INIT_CRC32_COMPAT(backup_version, crc); /* read and validate pages one by one */ while (true) @@ -1718,7 +1720,7 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, if (interrupted || thread_interrupted) elog(ERROR, "Interrupted during data file validation"); - /* newer backups have page headers in separate storage */ + /* newer backups (post 2.4.0) have page headers in separate storage */ if (headers) { n_hdr++; @@ -1747,10 +1749,10 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, cur_pos_in = headers[n_hdr].pos; } } - /* old backups rely on header located directly in data file */ + /* old backups (pre 2.4.0) rely on header located directly in data file */ else { - if (get_page_header(in, fullpath, &(compressed_page).bph, &crc, use_crc32c)) + if (get_page_header(in, fullpath, &(compressed_page).bph, &crc, backup_version)) { /* Backward compatibility kludge, TODO: remove in 3.0 * for some reason we padded compressed pages in old versions @@ -1790,9 +1792,9 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, cur_pos_in += read_len; if (headers) - COMP_FILE_CRC32(use_crc32c, crc, &compressed_page, read_len); + COMP_CRC32_COMPAT(backup_version, crc, &compressed_page, read_len); else - COMP_FILE_CRC32(use_crc32c, crc, compressed_page.data, read_len); + COMP_CRC32_COMPAT(backup_version, crc, compressed_page.data, read_len); if (compressed_size != BLCKSZ || page_may_be_compressed(compressed_page.data, file->compress_alg, @@ -1861,7 +1863,7 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, } } - FIN_FILE_CRC32(use_crc32c, crc); + FIN_CRC32_COMPAT(backup_version, crc); fclose(in); if (crc != file->crc) @@ -2017,7 +2019,7 @@ get_lsn_map(const char *fullpath, uint32 checksum_version, /* Every page in data file contains BackupPageHeader, extract it */ bool get_page_header(FILE *in, const char *fullpath, BackupPageHeader* bph, - pg_crc32 *crc, bool use_crc32c) + pg_crc32 *crc, uint32 backup_version) { /* read BackupPageHeader */ size_t read_len = fread(bph, 1, sizeof(BackupPageHeader), in); @@ -2044,7 +2046,7 @@ get_page_header(FILE *in, const char *fullpath, BackupPageHeader* bph, * the problem of backward compatibility for backups of old versions */ if (crc) - COMP_FILE_CRC32(use_crc32c, *crc, bph, read_len); + COMP_CRC32_COMPAT(backup_version, *crc, bph, read_len); if (bph->block == 0 && bph->compressed_size == 0) elog(ERROR, "Empty block in file \"%s\"", fullpath); @@ -2363,6 +2365,8 @@ copy_pages(const char *to_fullpath, const char *from_fullpath, * array of headers. * TODO: some access optimizations would be great here: * less fseeks, buffering, descriptor sharing, etc. + * + * Used for post 2.4.0 backups */ BackupPageHeader2* get_data_file_headers(HeaderMap *hdr_map, pgFile *file, uint32 backup_version, bool strict) @@ -2437,9 +2441,9 @@ get_data_file_headers(HeaderMap *hdr_map, pgFile *file, uint32 backup_version, b } /* validate checksum */ - INIT_FILE_CRC32(true, hdr_crc); - COMP_FILE_CRC32(true, hdr_crc, headers, read_len); - FIN_FILE_CRC32(true, hdr_crc); + INIT_CRC32C(hdr_crc); + COMP_CRC32C(hdr_crc, headers, read_len); + FIN_CRC32C(hdr_crc); if (hdr_crc != file->hdr_crc) { @@ -2486,9 +2490,9 @@ write_page_headers(BackupPageHeader2 *headers, pgFile *file, HeaderMap *hdr_map, read_len = (file->n_headers + 1) * sizeof(BackupPageHeader2); /* calculate checksums */ - INIT_FILE_CRC32(true, file->hdr_crc); - COMP_FILE_CRC32(true, file->hdr_crc, headers, read_len); - FIN_FILE_CRC32(true, file->hdr_crc); + INIT_CRC32C(file->hdr_crc); + COMP_CRC32C(file->hdr_crc, headers, read_len); + FIN_CRC32C(file->hdr_crc); zheaders = pgut_malloc(read_len * 2); memset(zheaders, 0, read_len * 2); diff --git a/src/dir.c b/src/dir.c index 0bcd60169..53f92ef74 100644 --- a/src/dir.c +++ b/src/dir.c @@ -203,26 +203,23 @@ pgFileInit(const char *rel_path) * obvious about it. */ pg_crc32 -pgFileGetCRC(const char *file_path, bool use_crc32c, bool missing_ok) +pgFileGetCRC32C(const char *file_path, bool missing_ok) { FILE *fp; pg_crc32 crc = 0; char *buf; size_t len = 0; - INIT_FILE_CRC32(use_crc32c, crc); + INIT_CRC32C(crc); /* open file in binary read mode */ fp = fopen(file_path, PG_BINARY_R); if (fp == NULL) { - if (errno == ENOENT) + if (missing_ok && errno == ENOENT) { - if (missing_ok) - { - FIN_FILE_CRC32(use_crc32c, crc); - return crc; - } + FIN_CRC32C(crc); + return crc; } elog(ERROR, "Cannot open file \"%s\": %s", @@ -234,7 +231,7 @@ pgFileGetCRC(const char *file_path, bool use_crc32c, bool missing_ok) buf = pgut_malloc(STDIO_BUFSIZE); /* calc CRC of file */ - for (;;) + do { if (interrupted) elog(ERROR, "interrupted during CRC calculation"); @@ -244,19 +241,75 @@ pgFileGetCRC(const char *file_path, bool use_crc32c, bool missing_ok) if (ferror(fp)) elog(ERROR, "Cannot read \"%s\": %s", file_path, strerror(errno)); - /* update CRC */ - COMP_FILE_CRC32(use_crc32c, crc, buf, len); + COMP_CRC32C(crc, buf, len); + } + while (!feof(fp)); + + FIN_CRC32C(crc); + fclose(fp); + pg_free(buf); + + return crc; +} + +#if PG_VERSION_NUM < 120000 +/* + * Read the local file to compute its CRC using traditional algorithm. + * (*_TRADITIONAL_CRC32 macros) + * This was used only in version 2.0.22--2.0.24 + * And never used for PG >= 12 + * To be removed with end of PG-11 support + */ +pg_crc32 +pgFileGetCRC32(const char *file_path, bool missing_ok) +{ + FILE *fp; + pg_crc32 crc = 0; + char *buf; + size_t len = 0; + + INIT_TRADITIONAL_CRC32(crc); + + /* open file in binary read mode */ + fp = fopen(file_path, PG_BINARY_R); + if (fp == NULL) + { + if (missing_ok && errno == ENOENT) + { + FIN_TRADITIONAL_CRC32(crc); + return crc; + } - if (feof(fp)) - break; + elog(ERROR, "Cannot open file \"%s\": %s", + file_path, strerror(errno)); } - FIN_FILE_CRC32(use_crc32c, crc); + /* disable stdio buffering */ + setvbuf(fp, NULL, _IONBF, BUFSIZ); + buf = pgut_malloc(STDIO_BUFSIZE); + + /* calc CRC of file */ + do + { + if (interrupted) + elog(ERROR, "interrupted during CRC calculation"); + + len = fread(buf, 1, STDIO_BUFSIZE, fp); + + if (ferror(fp)) + elog(ERROR, "Cannot read \"%s\": %s", file_path, strerror(errno)); + + COMP_TRADITIONAL_CRC32(crc, buf, len); + } + while (!feof(fp)); + + FIN_TRADITIONAL_CRC32(crc); fclose(fp); pg_free(buf); return crc; } +#endif /* PG_VERSION_NUM < 120000 */ /* * Read the local file to compute its CRC. @@ -265,7 +318,7 @@ pgFileGetCRC(const char *file_path, bool use_crc32c, bool missing_ok) * obvious about it. */ pg_crc32 -pgFileGetCRCgz(const char *file_path, bool use_crc32c, bool missing_ok) +pgFileGetCRC32Cgz(const char *file_path, bool missing_ok) { gzFile fp; pg_crc32 crc = 0; @@ -273,19 +326,16 @@ pgFileGetCRCgz(const char *file_path, bool use_crc32c, bool missing_ok) int err; char *buf; - INIT_FILE_CRC32(use_crc32c, crc); + INIT_CRC32C(crc); /* open file in binary read mode */ fp = gzopen(file_path, PG_BINARY_R); if (fp == NULL) { - if (errno == ENOENT) + if (missing_ok && errno == ENOENT) { - if (missing_ok) - { - FIN_FILE_CRC32(use_crc32c, crc); - return crc; - } + FIN_CRC32C(crc); + return crc; } elog(ERROR, "Cannot open file \"%s\": %s", @@ -311,16 +361,16 @@ pgFileGetCRCgz(const char *file_path, bool use_crc32c, bool missing_ok) { const char *err_str = NULL; - err_str = gzerror(fp, &err); - elog(ERROR, "Cannot read from compressed file %s", err_str); + err_str = gzerror(fp, &err); + elog(ERROR, "Cannot read from compressed file %s", err_str); } } /* update CRC */ - COMP_FILE_CRC32(use_crc32c, crc, buf, len); + COMP_CRC32C(crc, buf, len); } - FIN_FILE_CRC32(use_crc32c, crc); + FIN_CRC32C(crc); gzclose(fp); pg_free(buf); @@ -1758,7 +1808,7 @@ write_database_map(pgBackup *backup, parray *database_map, parray *backup_files_ /* Add metadata to backup_content.control */ file = pgFileNew(database_map_path, DATABASE_MAP, true, 0, FIO_BACKUP_HOST); - file->crc = pgFileGetCRC(database_map_path, true, false); + file->crc = pgFileGetCRC32C(database_map_path, false); file->write_size = file->size; file->uncompressed_size = file->read_size; diff --git a/src/pg_probackup.h b/src/pg_probackup.h index aeb55f83e..a52942d06 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -208,28 +208,6 @@ typedef enum ForkName ptrack } ForkName; -#define INIT_FILE_CRC32(use_crc32c, crc) \ -do { \ - if (use_crc32c) \ - INIT_CRC32C(crc); \ - else \ - INIT_TRADITIONAL_CRC32(crc); \ -} while (0) -#define COMP_FILE_CRC32(use_crc32c, crc, data, len) \ -do { \ - if (use_crc32c) \ - COMP_CRC32C((crc), (data), (len)); \ - else \ - COMP_TRADITIONAL_CRC32(crc, data, len); \ -} while (0) -#define FIN_FILE_CRC32(use_crc32c, crc) \ -do { \ - if (use_crc32c) \ - FIN_CRC32C(crc); \ - else \ - FIN_TRADITIONAL_CRC32(crc); \ -} while (0) - #define pg_off_t unsigned long long @@ -1046,8 +1024,11 @@ extern pgFile *pgFileNew(const char *path, const char *rel_path, extern pgFile *pgFileInit(const char *rel_path); extern void pgFileFree(void *file); -extern pg_crc32 pgFileGetCRC(const char *file_path, bool use_crc32c, bool missing_ok); -extern pg_crc32 pgFileGetCRCgz(const char *file_path, bool use_crc32c, bool missing_ok); +extern pg_crc32 pgFileGetCRC32C(const char *file_path, bool missing_ok); +#if PG_VERSION_NUM < 120000 +extern pg_crc32 pgFileGetCRC32(const char *file_path, bool missing_ok); +#endif +extern pg_crc32 pgFileGetCRC32Cgz(const char *file_path, bool missing_ok); extern int pgFileMapComparePath(const void *f1, const void *f2); extern int pgFileCompareName(const void *f1, const void *f2); diff --git a/src/restore.c b/src/restore.c index 28a79f1ed..e4479a242 100644 --- a/src/restore.c +++ b/src/restore.c @@ -2010,7 +2010,6 @@ get_dbOid_exclude_list(pgBackup *backup, parray *datname_list, { int i; int j; -// pg_crc32 crc; parray *database_map = NULL; parray *dbOid_exclude_list = NULL; pgFile *database_map_file = NULL; @@ -2040,13 +2039,6 @@ get_dbOid_exclude_list(pgBackup *backup, parray *datname_list, join_path_components(path, backup->root_dir, DATABASE_DIR); join_path_components(database_map_path, path, DATABASE_MAP); - /* check database_map CRC */ -// crc = pgFileGetCRC(database_map_path, true, true, NULL, FIO_LOCAL_HOST); -// -// if (crc != database_map_file->crc) -// elog(ERROR, "Invalid CRC of backup file \"%s\" : %X. Expected %X", -// database_map_file->path, crc, database_map_file->crc); - /* get database_map from file */ database_map = read_database_map(backup); diff --git a/src/stream.c b/src/stream.c index b10eb7308..e2e016f4d 100644 --- a/src/stream.c +++ b/src/stream.c @@ -2,7 +2,7 @@ * * stream.c: pg_probackup specific code for WAL streaming * - * Portions Copyright (c) 2015-2022, Postgres Professional + * Copyright (c) 2015-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -689,7 +689,7 @@ add_walsegment_to_filelist(parray *filelist, uint32 timeline, XLogRecPtr xlogpos if (existing_file) { if (do_crc) - (*existing_file)->crc = pgFileGetCRC(wal_segment_fullpath, true, false); + (*existing_file)->crc = pgFileGetCRC32C(wal_segment_fullpath, false); (*existing_file)->write_size = xlog_seg_size; (*existing_file)->uncompressed_size = xlog_seg_size; @@ -697,7 +697,7 @@ add_walsegment_to_filelist(parray *filelist, uint32 timeline, XLogRecPtr xlogpos } if (do_crc) - file->crc = pgFileGetCRC(wal_segment_fullpath, true, false); + file->crc = pgFileGetCRC32C(wal_segment_fullpath, false); /* Should we recheck it using stat? */ file->write_size = xlog_seg_size; @@ -728,7 +728,7 @@ add_history_file_to_filelist(parray *filelist, uint32 timeline, char *basedir) /* calculate crc */ if (do_crc) - file->crc = pgFileGetCRC(fullpath, true, false); + file->crc = pgFileGetCRC32C(fullpath, false); file->write_size = file->size; file->uncompressed_size = file->size; diff --git a/src/util.c b/src/util.c index b58d88f96..e16241a70 100644 --- a/src/util.c +++ b/src/util.c @@ -304,7 +304,7 @@ get_pgcontrol_checksum(const char *pgdata_path) /* First fetch file... */ buffer = slurpFile(FIO_BACKUP_HOST, pgdata_path, XLOG_CONTROL_FILE, &size, false); - + elog(WARNING, "checking %s", pgdata_path); digestControlFile(&ControlFile, buffer, size); pg_free(buffer); diff --git a/src/utils/file.c b/src/utils/file.c index 86977a19a..27b5edf86 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1428,9 +1428,9 @@ fio_get_crc32(fio_location location, const char *file_path, bool decompress) else { if (decompress) - return pgFileGetCRCgz(file_path, true, true); + return pgFileGetCRC32Cgz(file_path, true); else - return pgFileGetCRC(file_path, true, true); + return pgFileGetCRC32C(file_path, true); } } @@ -2082,7 +2082,7 @@ fio_send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, Assert(hdr.size <= sizeof(buf)); IO_CHECK(fio_read_all(fio_stdin, buf, hdr.size), hdr.size); - COMP_FILE_CRC32(true, file->crc, buf, hdr.size); + COMP_CRC32C(file->crc, buf, hdr.size); /* lazily open backup file */ if (!out) @@ -2270,8 +2270,6 @@ fio_copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, Assert(hdr.size <= sizeof(buf)); IO_CHECK(fio_read_all(fio_stdin, buf, hdr.size), hdr.size); - COMP_FILE_CRC32(true, file->crc, buf, hdr.size); - if (fio_fseek(out, blknum * BLCKSZ) < 0) { elog(ERROR, "Cannot seek block %u of \"%s\": %s", @@ -2635,7 +2633,7 @@ fio_send_file(const char *from_fullpath, const char *to_fullpath, FILE* out, if (file) { file->read_size += hdr.size; - COMP_FILE_CRC32(true, file->crc, buf, hdr.size); + COMP_CRC32C(file->crc, buf, hdr.size); } } else @@ -3366,9 +3364,9 @@ fio_communicate(int in, int out) case FIO_GET_CRC32: /* calculate crc32 for a file */ if (hdr.arg == 1) - crc = pgFileGetCRCgz(buf, true, true); + crc = pgFileGetCRC32Cgz(buf, true); else - crc = pgFileGetCRC(buf, true, true); + crc = pgFileGetCRC32C(buf, true); IO_CHECK(fio_write_all(out, &crc, sizeof(crc)), sizeof(crc)); break; case FIO_GET_CHECKSUM_MAP: @@ -3606,9 +3604,9 @@ pioLocalDrive_pioGetCRC32(VSelf, path_t path, bool compressed, err_i *err) elog(VERBOSE, "Local Drive calculate crc32 for '%s', compressed=%d", path, compressed); if (compressed) - return pgFileGetCRCgz(path, true, true); + return pgFileGetCRC32Cgz(path, true); else - return pgFileGetCRC(path, true, true); + return pgFileGetCRC32C(path, true); } static bool diff --git a/src/utils/pgut.h b/src/utils/pgut.h index 638259a3c..72ac20379 100644 --- a/src/utils/pgut.h +++ b/src/utils/pgut.h @@ -3,7 +3,7 @@ * pgut.h * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2017-2021, Postgres Professional + * Portions Copyright (c) 2017-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -11,6 +11,7 @@ #ifndef PGUT_H #define PGUT_H +#include #include "postgres_fe.h" #include "libpq-fe.h" diff --git a/src/validate.c b/src/validate.c index d88de5583..f76def06f 100644 --- a/src/validate.c +++ b/src/validate.c @@ -3,7 +3,7 @@ * validate.c: validate backup files. * * Portions Copyright (c) 2009-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2019, Postgres Professional + * Portions Copyright (c) 2015-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -341,14 +341,22 @@ pgBackupValidateFiles(void *arg) * Starting from 2.0.25 we calculate crc of pg_control differently. */ if (arguments->backup_version >= 20025 && - strcmp(file->name, "pg_control") == 0 && - !file->external_dir_num) + strcmp(file->rel_path, XLOG_CONTROL_FILE) == 0 && + file->external_dir_num == 0) crc = get_pgcontrol_checksum(arguments->base_path); else - crc = pgFileGetCRC(file_fullpath, - arguments->backup_version <= 20021 || - arguments->backup_version >= 20025, - false); +#if PG_VERSION_NUM >= 120000 + { + Assert(arguments->backup_version >= 20025); + crc = pgFileGetCRC32C(file_fullpath, false); + } +#else /* PG_VERSION_NUM < 120000 */ + if (arguments->backup_version <= 20021 || arguments->backup_version >= 20025) + crc = pgFileGetCRC32C(file_fullpath, false); + else + crc = pgFileGetCRC32(file_fullpath, false); +#endif /* PG_VERSION_NUM < 120000 */ + if (crc != file->crc) { elog(WARNING, "Invalid CRC of backup file \"%s\" : %X. Expected %X", @@ -720,8 +728,6 @@ validate_tablespace_map(pgBackup *backup, bool no_validate) pgFile **tablespace_map = NULL; pg_crc32 crc; parray *files = get_backup_filelist(backup, true); - bool use_crc32c = parse_program_version(backup->program_version) <= 20021 || - parse_program_version(backup->program_version) >= 20025; parray_qsort(files, pgFileCompareRelPathWithExternal); join_path_components(map_path, backup->database_dir, PG_TABLESPACE_MAP_FILE); @@ -746,7 +752,16 @@ validate_tablespace_map(pgBackup *backup, bool no_validate) /* check tablespace map checksumms */ if (!no_validate) { - crc = pgFileGetCRC(map_path, use_crc32c, false); +#if PG_VERSION_NUM >= 120000 + Assert(parse_program_version(backup->program_version) >= 20025); + crc = pgFileGetCRC32C(map_path, false); +#else /* PG_VERSION_NUM < 120000 */ + if (parse_program_version(backup->program_version) <= 20021 + || parse_program_version(backup->program_version) >= 20025) + crc = pgFileGetCRC32C(map_path, false); + else + crc = pgFileGetCRC32(map_path, false); +#endif /* PG_VERSION_NUM < 120000 */ if ((*tablespace_map)->crc != crc) elog(ERROR, "Invalid CRC of tablespace map file \"%s\" : %X. Expected %X, " From 585a069a630181c5ba8bd66e2371a98573dbcbdb Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" <16117281+kulaginm@users.noreply.github.com> Date: Thu, 1 Sep 2022 22:48:23 +0300 Subject: [PATCH 044/339] [PBCKP-232] Remove PG-9.5-9.6 support (#523) * removed support for older versions of postgres (9.5, 9.6) * removed obsolete options (--master-db, --master-host, --master-port, --master-user, --replica-timeout) --- .travis.yml | 3 - Makefile | 26 +- README.md | 72 +++--- get_pg_version.mk | 36 --- src/backup.c | 182 ++++---------- src/catchup.c | 21 +- src/configure.c | 51 +--- src/dir.c | 16 -- src/help.c | 16 +- src/parsexlog.c | 5 - src/pg_probackup.c | 7 +- src/pg_probackup.h | 18 +- src/stream.c | 42 +--- src/util.c | 42 +--- src/utils/file.c | 5 +- src/utils/pgut.c | 18 +- tests/archive.py | 178 +++---------- tests/auth_test.py | 30 +-- tests/backup.py | 429 ++++++++------------------------ tests/catchup.py | 41 ++- tests/checkdb.py | 61 +---- tests/compatibility.py | 48 ++++ tests/false_positive.py | 3 - tests/helpers/ptrack_helpers.py | 93 ++----- tests/incr_restore.py | 10 - tests/pgpro2068.py | 24 +- tests/pgpro560.py | 40 +-- tests/ptrack.py | 247 ++++-------------- tests/replica.py | 109 +------- tests/restore.py | 203 +++++---------- tests/retention.py | 13 - tests/validate.py | 19 +- 32 files changed, 479 insertions(+), 1629 deletions(-) delete mode 100644 get_pg_version.mk diff --git a/.travis.yml b/.travis.yml index 26b2bc4e2..9e48c9cab 100644 --- a/.travis.yml +++ b/.travis.yml @@ -32,8 +32,6 @@ env: - PG_VERSION=12 PG_BRANCH=REL_12_STABLE PTRACK_PATCH_PG_BRANCH=REL_12_STABLE - PG_VERSION=11 PG_BRANCH=REL_11_STABLE PTRACK_PATCH_PG_BRANCH=REL_11_STABLE - PG_VERSION=10 PG_BRANCH=REL_10_STABLE - - PG_VERSION=9.6 PG_BRANCH=REL9_6_STABLE - - PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=archive # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=backup # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup @@ -52,7 +50,6 @@ env: jobs: allow_failures: - if: env(PG_BRANCH) = master - - if: env(PG_BRANCH) = REL9_5_STABLE # - if: env(MODE) IN (archive, backup, delta, locking, merge, replica, retention, restore) # Only run CI for master branch commits to limit our travis usage diff --git a/Makefile b/Makefile index 6e2aa8ded..2b663f59b 100644 --- a/Makefile +++ b/Makefile @@ -17,21 +17,17 @@ # git clone https://github.com/postgrespro/pg_probackup postgresql/contrib/pg_probackup # cd postgresql # ./configure ... && make -# make --no-print-directory -C contrib/pg_probackup +# make -C contrib/pg_probackup # # 4. out of PG source and without PGXS # git clone https://git.postgresql.org/git/postgresql.git postgresql-src # git clone https://github.com/postgrespro/pg_probackup postgresql-src/contrib/pg_probackup # mkdir postgresql-build && cd postgresql-build # ../postgresql-src/configure ... && make -# make --no-print-directory -C contrib/pg_probackup +# make -C contrib/pg_probackup # top_pbk_srcdir := $(dir $(realpath $(firstword $(MAKEFILE_LIST)))) -# get postgres version -PG_MAJORVER != $(MAKE) USE_PGXS=$(USE_PGXS) PG_CONFIG=$(PG_CONFIG) --silent --makefile=$(top_pbk_srcdir)get_pg_version.mk -#$(info Making with PG_MAJORVER=$(PG_MAJORVER)) - PROGRAM := pg_probackup # pg_probackup sources @@ -46,18 +42,14 @@ OBJS += src/archive.o src/backup.o src/catalog.o src/checkdb.o src/configure.o s BORROWED_H_SRC := \ src/include/portability/instr_time.h \ src/bin/pg_basebackup/receivelog.h \ - src/bin/pg_basebackup/streamutil.h + src/bin/pg_basebackup/streamutil.h \ + src/bin/pg_basebackup/walmethods.h BORROWED_C_SRC := \ src/backend/access/transam/xlogreader.c \ src/backend/utils/hash/pg_crc.c \ src/bin/pg_basebackup/receivelog.c \ - src/bin/pg_basebackup/streamutil.c -ifneq ($(PG_MAJORVER), $(findstring $(PG_MAJORVER), 9.5 9.6)) -BORROWED_H_SRC += \ - src/bin/pg_basebackup/walmethods.h -BORROWED_C_SRC += \ + src/bin/pg_basebackup/streamutil.c \ src/bin/pg_basebackup/walmethods.c -endif OBJS += src/fu_util/impl/ft_impl.o src/fu_util/impl/fo_impl.o @@ -85,9 +77,6 @@ include $(top_builddir)/src/Makefile.global include $(top_srcdir)/contrib/contrib-global.mk endif -# now we can use standard MAJORVERSION variable instead of calculated PG_MAJORVER -undefine PG_MAJORVER - # PG_CPPFLAGS = -I$(libpq_srcdir) ${PTHREAD_CFLAGS} -I$(top_pbk_srcdir)src -I$(BORROW_DIR) PG_CPPFLAGS += -I$(top_pbk_srcdir)src/fu_util -Wno-declaration-after-statement @@ -100,11 +89,8 @@ PG_LIBS_INTERNAL = $(libpq_pgport) ${PTHREAD_CFLAGS} # additional dependencies on borrowed files src/archive.o: $(BORROW_DIR)/instr_time.h src/backup.o src/catchup.o src/pg_probackup.o: $(BORROW_DIR)/streamutil.h -src/stream.o $(BORROW_DIR)/receivelog.o $(BORROW_DIR)/streamutil.o: $(BORROW_DIR)/receivelog.h -ifneq ($(MAJORVERSION), $(findstring $(MAJORVERSION), 9.5 9.6)) +src/stream.o $(BORROW_DIR)/receivelog.o $(BORROW_DIR)/streamutil.o $(BORROW_DIR)/walmethods.o: $(BORROW_DIR)/receivelog.h $(BORROW_DIR)/receivelog.h: $(BORROW_DIR)/walmethods.h -$(BORROW_DIR)/walmethods.o: $(BORROW_DIR)/receivelog.h -endif # generate separate makefile to handle borrowed files borrowed.mk: $(firstword $(MAKEFILE_LIST)) diff --git a/README.md b/README.md index 5da8d199e..281116cce 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ `pg_probackup` is a utility to manage backup and recovery of PostgreSQL database clusters. It is designed to perform periodic backups of the PostgreSQL instance that enable you to restore the server in case of a failure. The utility is compatible with: -* PostgreSQL 9.6, 10, 11, 12, 13, 14; +* PostgreSQL 10, 11, 12, 13, 14; As compared to other backup solutions, `pg_probackup` offers the following benefits that can help you implement different backup strategies and deal with large amounts of data: * Incremental backup: page-level incremental backup allows you to save disk space, speed up backup and restore. With three different incremental modes, you can plan the backup strategy in accordance with your data flow. @@ -74,62 +74,62 @@ Installers are available in release **assets**. [Latests](https://github.com/pos #DEB Ubuntu|Debian Packages sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{14,13,12,11,10,9.6} -sudo apt-get install pg-probackup-{14,13,12,11,10,9.6}-dbg +sudo apt-get install pg-probackup-{14,13,12,11,10} +sudo apt-get install pg-probackup-{14,13,12,11,10}-dbg #DEB-SRC Packages sudo sh -c 'echo "deb-src [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" >>\ /etc/apt/sources.list.d/pg_probackup.list' && sudo apt-get update -sudo apt-get source pg-probackup-{14,13,12,11,10,9.6} +sudo apt-get source pg-probackup-{14,13,12,11,10} #DEB Astra Linix Orel sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ stretch main-stretch" > /etc/apt/sources.list.d/pg_probackup.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{14,13,12,11,10,9.6}{-dbg,} +sudo apt-get install pg-probackup-{14,13,12,11,10}{-dbg,} #RPM Centos Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-centos.noarch.rpm -yum install pg_probackup-{14,13,12,11,10,9.6} -yum install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{14,13,12,11,10} +yum install pg_probackup-{14,13,12,11,10}-debuginfo #RPM RHEL Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-rhel.noarch.rpm -yum install pg_probackup-{14,13,12,11,10,9.6} -yum install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{14,13,12,11,10} +yum install pg_probackup-{14,13,12,11,10}-debuginfo #RPM Oracle Linux Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-oraclelinux.noarch.rpm -yum install pg_probackup-{14,13,12,11,10,9.6} -yum install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{14,13,12,11,10} +yum install pg_probackup-{14,13,12,11,10}-debuginfo #SRPM Centos|RHEL|OracleLinux Packages -yumdownloader --source pg_probackup-{14,13,12,11,10,9.6} +yumdownloader --source pg_probackup-{14,13,12,11,10} #RPM SUSE|SLES Packages zypper install --allow-unsigned-rpm -y https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-suse.noarch.rpm -zypper --gpg-auto-import-keys install -y pg_probackup-{14,13,12,11,10,9.6} -zypper install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +zypper --gpg-auto-import-keys install -y pg_probackup-{14,13,12,11,10} +zypper install pg_probackup-{14,13,12,11,10}-debuginfo #SRPM SUSE|SLES Packages -zypper si pg_probackup-{14,13,12,11,10,9.6} +zypper si pg_probackup-{14,13,12,11,10} #RPM ALT Linux 7 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p7 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{14,13,12,11,10} +sudo apt-get install pg_probackup-{14,13,12,11,10}-debuginfo #RPM ALT Linux 8 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p8 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{14,13,12,11,10} +sudo apt-get install pg_probackup-{14,13,12,11,10}-debuginfo #RPM ALT Linux 9 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p9 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{14,13,12,11,10} +sudo apt-get install pg_probackup-{14,13,12,11,10}-debuginfo ``` #### pg_probackup for PostgresPro Standard and Enterprise @@ -137,46 +137,46 @@ sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo #DEB Ubuntu|Debian Packages sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup-forks/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup-forks.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup-forks/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{std,ent}-{13,12,11,10,9.6} -sudo apt-get install pg-probackup-{std,ent}-{13,12,11,10,9.6}-dbg +sudo apt-get install pg-probackup-{std,ent}-{13,12,11,10} +sudo apt-get install pg-probackup-{std,ent}-{13,12,11,10}-dbg #DEB Astra Linix Orel sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup-forks/deb/ stretch main-stretch" > /etc/apt/sources.list.d/pg_probackup.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup-forks/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{std,ent}-{12,11,10,9.6}{-dbg,} +sudo apt-get install pg-probackup-{std,ent}-{12,11,10}{-dbg,} #RPM Centos Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-centos.noarch.rpm -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{std,ent}-{13,12,11,10} +yum install pg_probackup-{std,ent}-{13,12,11,10}-debuginfo #RPM RHEL Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-rhel.noarch.rpm -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{std,ent}-{13,12,11,10} +yum install pg_probackup-{std,ent}-{13,12,11,10}-debuginfo #RPM Oracle Linux Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-oraclelinux.noarch.rpm -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{std,ent}-{13,12,11,10} +yum install pg_probackup-{std,ent}-{13,12,11,10}-debuginfo #RPM ALT Linux 7 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p7 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10} +sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10}-debuginfo #RPM ALT Linux 8 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p8 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10} +sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10}-debuginfo #RPM ALT Linux 9 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p9 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' && sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10} +sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10}-debuginfo ``` Once you have `pg_probackup` installed, complete [the setup](https://postgrespro.github.io/pg_probackup/#pbk-install-and-setup). diff --git a/get_pg_version.mk b/get_pg_version.mk deleted file mode 100644 index d5468c5bb..000000000 --- a/get_pg_version.mk +++ /dev/null @@ -1,36 +0,0 @@ -# pg_probackup build system -# -# When building pg_probackup, there is a chicken and egg problem: -# 1. We have to define the OBJS list before including the PG makefiles. -# 2. To define this list, we need to know the PG major version. -# 3. But we can find out the postgres version only after including makefiles. -# -# This minimal makefile solves this problem, its only purpose is to -# calculate the version number from which the main build will occur next. -# -# Usage: -# include this line into main makefile -# PG_MAJORVER != $(MAKE) USE_PGXS=$(USE_PGXS) PG_CONFIG=$(PG_CONFIG) --silent --makefile=get_pg_version.mk -# -# Known issues: -# When parent make called with -C and without --no-print-directory, then -# 'make: Leaving directory ...' string will be added (by caller make process) to PG_MAJORVER -# (at least with GNU Make 4.2.1) -# -.PHONY: get_pg_version -get_pg_version: - -ifdef USE_PGXS -PG_CONFIG = pg_config -PGXS := $(shell $(PG_CONFIG) --pgxs) -include $(PGXS) -else -subdir = contrib/pg_probackup -top_builddir = ../.. -include $(top_builddir)/src/Makefile.global -include $(top_srcdir)/contrib/contrib-global.mk -endif - -get_pg_version: - $(info $(MAJORVERSION)) - diff --git a/src/backup.c b/src/backup.c index 7a77c2961..15f1a4d1c 100644 --- a/src/backup.c +++ b/src/backup.c @@ -32,9 +32,6 @@ parray *backup_files_list = NULL; /* We need critical section for datapagemap_add() in case of using threads */ static pthread_mutex_t backup_pagemap_mutex = PTHREAD_MUTEX_INITIALIZER; -// TODO: move to PGnodeInfo -bool exclusive_backup = false; - /* Is pg_start_backup() was executed */ bool backup_in_progress = false; @@ -80,7 +77,7 @@ backup_stopbackup_callback(bool fatal, void *userdata) { elog(WARNING, "backup in progress, stop backup"); /* don't care about stop_lsn in case of error */ - pg_stop_backup_send(st->conn, st->server_version, current.from_replica, exclusive_backup, NULL); + pg_stop_backup_send(st->conn, st->server_version, current.from_replica, NULL); } } @@ -133,12 +130,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, pg_start_backup(label, smooth_checkpoint, ¤t, nodeInfo, backup_conn); /* Obtain current timeline */ -#if PG_VERSION_NUM >= 90600 current.tli = get_current_timeline(backup_conn); -#else - /* PG-9.5 */ - current.tli = get_current_timeline_from_control(FIO_DB_HOST, instance_config.pgdata, false); -#endif /* * In incremental backup mode ensure that already-validated @@ -498,10 +490,10 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, /* Notify end of backup */ pg_stop_backup(instanceState, ¤t, backup_conn, nodeInfo); - /* In case of backup from replica >= 9.6 we must fix minRecPoint, + /* In case of backup from replica we must fix minRecPoint, * First we must find pg_control in backup_files_list. */ - if (current.from_replica && !exclusive_backup) + if (current.from_replica) { pgFile *pg_control = NULL; @@ -786,11 +778,6 @@ do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, } } - if (current.from_replica && exclusive_backup) - /* Check master connection options */ - if (instance_config.master_conn_opt.pghost == NULL) - elog(ERROR, "Options for connection to master must be provided to perform backup from replica"); - /* add note to backup if requested */ if (set_backup_params && set_backup_params->note) add_note(¤t, set_backup_params->note); @@ -871,22 +858,12 @@ check_server_version(PGconn *conn, PGNodeInfo *nodeInfo) elog(ERROR, "Unknown server version %d", nodeInfo->server_version); if (nodeInfo->server_version < 100000) - sprintf(nodeInfo->server_version_str, "%d.%d", - nodeInfo->server_version / 10000, - (nodeInfo->server_version / 100) % 100); - else - sprintf(nodeInfo->server_version_str, "%d", - nodeInfo->server_version / 10000); - - if (nodeInfo->server_version < 90500) elog(ERROR, "server version is %s, must be %s or higher", - nodeInfo->server_version_str, "9.5"); + nodeInfo->server_version_str, "10"); - if (current.from_replica && nodeInfo->server_version < 90600) - elog(ERROR, - "server version is %s, must be %s or higher for backup from replica", - nodeInfo->server_version_str, "9.6"); + sprintf(nodeInfo->server_version_str, "%d", + nodeInfo->server_version / 10000); if (nodeInfo->pgpro_support) res = pgut_execute(conn, "SELECT pg_catalog.pgpro_edition()", 0, NULL); @@ -927,9 +904,6 @@ check_server_version(PGconn *conn, PGNodeInfo *nodeInfo) if (res) PQclear(res); - - /* Do exclusive backup only for PostgreSQL 9.5 */ - exclusive_backup = nodeInfo->server_version < 90600; } /* @@ -1011,16 +985,10 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup, /* 2nd argument is 'fast'*/ params[1] = smooth ? "false" : "true"; - if (!exclusive_backup) - res = pgut_execute(conn, - "SELECT pg_catalog.pg_start_backup($1, $2, false)", - 2, - params); - else - res = pgut_execute(conn, - "SELECT pg_catalog.pg_start_backup($1, $2)", - 2, - params); + res = pgut_execute(conn, + "SELECT pg_catalog.pg_start_backup($1, $2, false)", + 2, + params); /* * Set flag that pg_start_backup() was called. If an error will happen it @@ -1039,21 +1007,16 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup, PQclear(res); if ((!backup->stream || backup->backup_mode == BACKUP_MODE_DIFF_PAGE) && - !backup->from_replica && - !(nodeInfo->server_version < 90600 && - !nodeInfo->is_superuser)) + !backup->from_replica) /* * Switch to a new WAL segment. It is necessary to get archived WAL * segment, which includes start LSN of current backup. - * Don`t do this for replica backups and for PG 9.5 if pguser is not superuser - * (because in 9.5 only superuser can switch WAL) */ pg_switch_wal(conn); } /* * Switch to a new WAL segment. It should be called only for master. - * For PG 9.5 it should be called only if pguser is superuser. */ void pg_switch_wal(PGconn *conn) @@ -1062,11 +1025,7 @@ pg_switch_wal(PGconn *conn) pg_silent_client_messages(conn); -#if PG_VERSION_NUM >= 100000 res = pgut_execute(conn, "SELECT pg_catalog.pg_switch_wal()", 0, NULL); -#else - res = pgut_execute(conn, "SELECT pg_catalog.pg_switch_xlog()", 0, NULL); -#endif PQclear(res); } @@ -1556,20 +1515,9 @@ pg_create_restore_point(PGconn *conn, time_t backup_start_time) } void -pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica, bool is_exclusive, char **query_text) +pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica, char **query_text) { static const char - stop_exlusive_backup_query[] = - /* - * Stop the non-exclusive backup. Besides stop_lsn it returns from - * pg_stop_backup(false) copy of the backup label and tablespace map - * so they can be written to disk by the caller. - * TODO, question: add NULLs as backup_label and tablespace_map? - */ - "SELECT" - " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," - " current_timestamp(0)::timestamptz," - " pg_catalog.pg_stop_backup() as lsn", stop_backup_on_master_query[] = "SELECT" " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," @@ -1578,16 +1526,8 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica " labelfile," " spcmapfile" " FROM pg_catalog.pg_stop_backup(false, false)", - stop_backup_on_master_before10_query[] = - "SELECT" - " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," - " current_timestamp(0)::timestamptz," - " lsn," - " labelfile," - " spcmapfile" - " FROM pg_catalog.pg_stop_backup(false)", /* - * In case of backup from replica >= 9.6 we do not trust minRecPoint + * In case of backup from replica we do not trust minRecPoint * and stop_backup LSN, so we use latest replayed LSN as STOP LSN. */ stop_backup_on_replica_query[] = @@ -1597,28 +1537,12 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica " pg_catalog.pg_last_wal_replay_lsn()," " labelfile," " spcmapfile" - " FROM pg_catalog.pg_stop_backup(false, false)", - stop_backup_on_replica_before10_query[] = - "SELECT" - " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," - " current_timestamp(0)::timestamptz," - " pg_catalog.pg_last_xlog_replay_location()," - " labelfile," - " spcmapfile" - " FROM pg_catalog.pg_stop_backup(false)"; + " FROM pg_catalog.pg_stop_backup(false, false)"; const char * const stop_backup_query = - is_exclusive ? - stop_exlusive_backup_query : - server_version >= 100000 ? - (is_started_on_replica ? + is_started_on_replica ? stop_backup_on_replica_query : - stop_backup_on_master_query - ) : - (is_started_on_replica ? - stop_backup_on_replica_before10_query : - stop_backup_on_master_before10_query - ); + stop_backup_on_master_query; bool sent = false; /* Make proper timestamp format for parse_time(recovery_time) */ @@ -1651,7 +1575,7 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica */ void pg_stop_backup_consume(PGconn *conn, int server_version, - bool is_exclusive, uint32 timeout, const char *query_text, + uint32 timeout, const char *query_text, PGStopBackupResult *result) { PGresult *query_result; @@ -1753,28 +1677,18 @@ pg_stop_backup_consume(PGconn *conn, int server_version, /* get backup_label_content */ result->backup_label_content = NULL; // if (!PQgetisnull(query_result, 0, backup_label_colno)) - if (!is_exclusive) - { - result->backup_label_content_len = PQgetlength(query_result, 0, backup_label_colno); - if (result->backup_label_content_len > 0) - result->backup_label_content = pgut_strndup(PQgetvalue(query_result, 0, backup_label_colno), - result->backup_label_content_len); - } else { - result->backup_label_content_len = 0; - } + result->backup_label_content_len = PQgetlength(query_result, 0, backup_label_colno); + if (result->backup_label_content_len > 0) + result->backup_label_content = pgut_strndup(PQgetvalue(query_result, 0, backup_label_colno), + result->backup_label_content_len); /* get tablespace_map_content */ result->tablespace_map_content = NULL; // if (!PQgetisnull(query_result, 0, tablespace_map_colno)) - if (!is_exclusive) - { - result->tablespace_map_content_len = PQgetlength(query_result, 0, tablespace_map_colno); - if (result->tablespace_map_content_len > 0) - result->tablespace_map_content = pgut_strndup(PQgetvalue(query_result, 0, tablespace_map_colno), - result->tablespace_map_content_len); - } else { - result->tablespace_map_content_len = 0; - } + result->tablespace_map_content_len = PQgetlength(query_result, 0, tablespace_map_colno); + if (result->tablespace_map_content_len > 0) + result->tablespace_map_content = pgut_strndup(PQgetvalue(query_result, 0, tablespace_map_colno), + result->tablespace_map_content_len); } /* @@ -1842,21 +1756,18 @@ pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startb /* Create restore point * Only if backup is from master. - * For PG 9.5 create restore point only if pguser is superuser. */ - if (!backup->from_replica && - !(nodeInfo->server_version < 90600 && - !nodeInfo->is_superuser)) //TODO: check correctness + if (!backup->from_replica) pg_create_restore_point(pg_startbackup_conn, backup->start_time); /* Execute pg_stop_backup using PostgreSQL connection */ - pg_stop_backup_send(pg_startbackup_conn, nodeInfo->server_version, backup->from_replica, exclusive_backup, &query_text); + pg_stop_backup_send(pg_startbackup_conn, nodeInfo->server_version, backup->from_replica, &query_text); /* * Wait for the result of pg_stop_backup(), but no longer than * archive_timeout seconds */ - pg_stop_backup_consume(pg_startbackup_conn, nodeInfo->server_version, exclusive_backup, timeout, query_text, &stop_backup_result); + pg_stop_backup_consume(pg_startbackup_conn, nodeInfo->server_version, timeout, query_text, &stop_backup_result); if (backup->stream) { @@ -1869,28 +1780,25 @@ pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startb wait_wal_and_calculate_stop_lsn(xlog_path, stop_backup_result.lsn, backup); /* Write backup_label and tablespace_map */ - if (!exclusive_backup) + Assert(stop_backup_result.backup_label_content != NULL); + + /* Write backup_label */ + pg_stop_backup_write_file_helper(backup->database_dir, PG_BACKUP_LABEL_FILE, "backup label", + stop_backup_result.backup_label_content, stop_backup_result.backup_label_content_len, + backup_files_list); + free(stop_backup_result.backup_label_content); + stop_backup_result.backup_label_content = NULL; + stop_backup_result.backup_label_content_len = 0; + + /* Write tablespace_map */ + if (stop_backup_result.tablespace_map_content != NULL) { - Assert(stop_backup_result.backup_label_content != NULL); - - /* Write backup_label */ - pg_stop_backup_write_file_helper(backup->database_dir, PG_BACKUP_LABEL_FILE, "backup label", - stop_backup_result.backup_label_content, stop_backup_result.backup_label_content_len, + pg_stop_backup_write_file_helper(backup->database_dir, PG_TABLESPACE_MAP_FILE, "tablespace map", + stop_backup_result.tablespace_map_content, stop_backup_result.tablespace_map_content_len, backup_files_list); - free(stop_backup_result.backup_label_content); - stop_backup_result.backup_label_content = NULL; - stop_backup_result.backup_label_content_len = 0; - - /* Write tablespace_map */ - if (stop_backup_result.tablespace_map_content != NULL) - { - pg_stop_backup_write_file_helper(backup->database_dir, PG_TABLESPACE_MAP_FILE, "tablespace map", - stop_backup_result.tablespace_map_content, stop_backup_result.tablespace_map_content_len, - backup_files_list); - free(stop_backup_result.tablespace_map_content); - stop_backup_result.tablespace_map_content = NULL; - stop_backup_result.tablespace_map_content_len = 0; - } + free(stop_backup_result.tablespace_map_content); + stop_backup_result.tablespace_map_content = NULL; + stop_backup_result.tablespace_map_content_len = 0; } if (backup->stream) diff --git a/src/catchup.c b/src/catchup.c index f2981b348..0f6e36b13 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -66,13 +66,7 @@ catchup_init_state(PGNodeInfo *source_node_info, const char *source_pgdata, cons source_node_info->is_ptrack_enabled = pg_is_ptrack_enabled(source_conn, source_node_info->ptrack_version_num); /* Obtain current timeline */ -#if PG_VERSION_NUM >= 90600 current.tli = get_current_timeline(source_conn); -#else - /* PG-9.5 */ - instance_config.pgdata = source_pgdata; - current.tli = get_current_timeline_from_control(FIO_DB_HOST, source_pgdata, false); -#endif elog(INFO, "Catchup start, pg_probackup version: %s, " "PostgreSQL version: %s, " @@ -191,9 +185,6 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, elog(ERROR, "Ptrack is disabled"); } - if (current.from_replica && exclusive_backup) - elog(ERROR, "Catchup from standby is only available for PostgreSQL >= 9.6"); - /* check that we don't overwrite tablespace in source pgdata */ catchup_check_tablespaces_existance_in_tbsmapping(source_conn); @@ -1018,13 +1009,13 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, pg_silent_client_messages(source_conn); /* Execute pg_stop_backup using PostgreSQL connection */ - pg_stop_backup_send(source_conn, source_node_info.server_version, current.from_replica, exclusive_backup, &stop_backup_query_text); + pg_stop_backup_send(source_conn, source_node_info.server_version, current.from_replica, &stop_backup_query_text); /* * Wait for the result of pg_stop_backup(), but no longer than * archive_timeout seconds */ - pg_stop_backup_consume(source_conn, source_node_info.server_version, exclusive_backup, timeout, stop_backup_query_text, &stop_backup_result); + pg_stop_backup_consume(source_conn, source_node_info.server_version, timeout, stop_backup_query_text, &stop_backup_result); /* Cleanup */ pg_free(stop_backup_query_text); @@ -1033,7 +1024,6 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, if (!dry_run) wait_wal_and_calculate_stop_lsn(dest_xlog_path, stop_backup_result.lsn, ¤t); -#if PG_VERSION_NUM >= 90600 /* Write backup_label */ Assert(stop_backup_result.backup_label_content != NULL); if (!dry_run) @@ -1061,7 +1051,6 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, stop_backup_result.tablespace_map_content = NULL; stop_backup_result.tablespace_map_content_len = 0; } -#endif /* wait for end of wal streaming and calculate wal size transfered */ if (!dry_run) @@ -1084,12 +1073,10 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, } /* - * In case of backup from replica >= 9.6 we must fix minRecPoint + * In case of backup from replica we must fix minRecPoint */ - if (current.from_replica && !exclusive_backup) - { + if (current.from_replica) set_min_recovery_point(source_pg_control_file, dest_pgdata, current.stop_lsn); - } /* close ssh session in main thread */ fio_disconnect(); diff --git a/src/configure.c b/src/configure.c index 0a1947de1..0de185b44 100644 --- a/src/configure.c +++ b/src/configure.c @@ -38,13 +38,15 @@ static void show_configure_json(ConfigOption *opt); #define OPTION_INSTANCE_GROUP "Backup instance information" #define OPTION_CONN_GROUP "Connection parameters" -#define OPTION_REPLICA_GROUP "Replica parameters" #define OPTION_ARCHIVE_GROUP "Archive parameters" #define OPTION_LOG_GROUP "Logging parameters" #define OPTION_RETENTION_GROUP "Retention parameters" #define OPTION_COMPRESS_GROUP "Compression parameters" #define OPTION_REMOTE_GROUP "Remote access parameters" +/* dummy placeholder for obsolete options to store in following instance_options[] */ +static char *obsolete_option_placeholder = NULL; + /* * Short name should be non-printable ASCII character. */ @@ -94,31 +96,26 @@ ConfigOption instance_options[] = &instance_config.conn_opt.pguser, SOURCE_CMD, 0, OPTION_CONN_GROUP, 0, option_get_value }, - /* Replica options */ + /* Obsolete options */ { 's', 202, "master-db", - &instance_config.master_conn_opt.pgdatabase, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value + &obsolete_option_placeholder, SOURCE_FILE_STRICT, SOURCE_CONST, "", 0, option_get_value }, { 's', 203, "master-host", - &instance_config.master_conn_opt.pghost, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value + &obsolete_option_placeholder, SOURCE_FILE_STRICT, SOURCE_CONST, "", 0, option_get_value }, { 's', 204, "master-port", - &instance_config.master_conn_opt.pgport, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value + &obsolete_option_placeholder, SOURCE_FILE_STRICT, SOURCE_CONST, "", 0, option_get_value }, { 's', 205, "master-user", - &instance_config.master_conn_opt.pguser, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value + &obsolete_option_placeholder, SOURCE_FILE_STRICT, SOURCE_CONST, "", 0, option_get_value }, { - 'u', 206, "replica-timeout", - &instance_config.replica_timeout, SOURCE_CMD, SOURCE_DEFAULT, - OPTION_REPLICA_GROUP, OPTION_UNIT_S, option_get_value + 's', 206, "replica-timeout", + &obsolete_option_placeholder, SOURCE_FILE_STRICT, SOURCE_CONST, "", 0, option_get_value }, /* Archive options */ { @@ -376,8 +373,6 @@ init_config(InstanceConfig *config, const char *instance_name) config->xlog_seg_size = XLOG_SEG_SIZE; #endif - config->replica_timeout = REPLICA_TIMEOUT_DEFAULT; - config->archive_timeout = ARCHIVE_TIMEOUT_DEFAULT; /* Copy logger defaults */ @@ -453,32 +448,6 @@ readInstanceConfigFile(InstanceState *instanceState) &instance->conn_opt.pguser, SOURCE_CMD, 0, OPTION_CONN_GROUP, 0, option_get_value }, - /* Replica options */ - { - 's', 202, "master-db", - &instance->master_conn_opt.pgdatabase, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value - }, - { - 's', 203, "master-host", - &instance->master_conn_opt.pghost, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value - }, - { - 's', 204, "master-port", - &instance->master_conn_opt.pgport, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value - }, - { - 's', 205, "master-user", - &instance->master_conn_opt.pguser, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value - }, - { - 'u', 206, "replica-timeout", - &instance->replica_timeout, SOURCE_CMD, SOURCE_DEFAULT, - OPTION_REPLICA_GROUP, OPTION_UNIT_S, option_get_value - }, /* Archive options */ { 'u', 207, "archive-timeout", diff --git a/src/dir.c b/src/dir.c index e555cfc27..0bcd60169 100644 --- a/src/dir.c +++ b/src/dir.c @@ -83,11 +83,7 @@ static char *pgdata_exclude_files[] = "probackup_recovery.conf", "recovery.signal", "standby.signal", - NULL -}; -static char *pgdata_exclude_files_non_exclusive[] = -{ /*skip in non-exclusive backup */ "backup_label", "tablespace_map", @@ -571,18 +567,6 @@ dir_check_file(pgFile *file, bool backup_logs) /* Check if we need to exclude file by name */ if (S_ISREG(file->mode)) { - if (!exclusive_backup) - { - for (i = 0; pgdata_exclude_files_non_exclusive[i]; i++) - if (strcmp(file->rel_path, - pgdata_exclude_files_non_exclusive[i]) == 0) - { - /* Skip */ - elog(LOG, "Excluding file: %s", file->name); - return CHECK_FALSE; - } - } - for (i = 0; pgdata_exclude_files[i]; i++) if (strcmp(file->rel_path, pgdata_exclude_files[i]) == 0) { diff --git a/src/help.c b/src/help.c index 116a0711c..d1002f063 100644 --- a/src/help.c +++ b/src/help.c @@ -2,7 +2,7 @@ * * help.c * - * Copyright (c) 2017-2021, Postgres Professional + * Copyright (c) 2017-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -428,13 +428,6 @@ help_backup(void) printf(_(" --remote-user=username user name for ssh connection (default: current user)\n")); printf(_(" --ssh-options=ssh_options additional ssh options (default: none)\n")); printf(_(" (example: --ssh-options='-c cipher_spec -F configfile')\n")); - - printf(_("\n Replica options:\n")); - printf(_(" --master-user=user_name user name to connect to master (deprecated)\n")); - printf(_(" --master-db=db_name database to connect to master (deprecated)\n")); - printf(_(" --master-host=host_name database server host of master (deprecated)\n")); - printf(_(" --master-port=port database server port of master (deprecated)\n")); - printf(_(" --replica-timeout=timeout wait timeout for WAL segment streaming through replication (deprecated)\n\n")); } static void @@ -931,13 +924,6 @@ help_set_config(void) printf(_(" --archive-host=destination address or hostname for ssh connection to archive host\n")); printf(_(" --archive-port=port port for ssh connection to archive host (default: 22)\n")); printf(_(" --archive-user=username user name for ssh connection to archive host (default: PostgreSQL user)\n")); - - printf(_("\n Replica options:\n")); - printf(_(" --master-user=user_name user name to connect to master (deprecated)\n")); - printf(_(" --master-db=db_name database to connect to master (deprecated)\n")); - printf(_(" --master-host=host_name database server host of master (deprecated)\n")); - printf(_(" --master-port=port database server port of master (deprecated)\n")); - printf(_(" --replica-timeout=timeout wait timeout for WAL segment streaming through replication (deprecated)\n\n")); } static void diff --git a/src/parsexlog.c b/src/parsexlog.c index df9b96fb3..39fb64f0a 100644 --- a/src/parsexlog.c +++ b/src/parsexlog.c @@ -29,13 +29,8 @@ * RmgrNames is an array of resource manager names, to make error messages * a bit nicer. */ -#if PG_VERSION_NUM >= 100000 #define PG_RMGR(symname,name,redo,desc,identify,startup,cleanup,mask) \ name, -#else -#define PG_RMGR(symname,name,redo,desc,identify,startup,cleanup) \ - name, -#endif static const char *RmgrNames[RM_MAX_ID + 1] = { #include "access/rmgrlist.h" diff --git a/src/pg_probackup.c b/src/pg_probackup.c index bbf1a785a..b7308405c 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -79,10 +79,8 @@ __thread int my_thread_num = 1; bool progress = false; bool no_sync = false; time_t start_time = 0; -#if PG_VERSION_NUM >= 100000 char *replication_slot = NULL; bool temp_slot = false; -#endif bool perm_slot = false; /* backup options */ @@ -208,9 +206,7 @@ static ConfigOption cmd_options[] = { 'f', 'b', "backup-mode", opt_backup_mode, SOURCE_CMD_STRICT }, { 'b', 'C', "smooth-checkpoint", &smooth_checkpoint, SOURCE_CMD_STRICT }, { 's', 'S', "slot", &replication_slot, SOURCE_CMD_STRICT }, -#if PG_VERSION_NUM >= 100000 { 'b', 181, "temp-slot", &temp_slot, SOURCE_CMD_STRICT }, -#endif { 'b', 'P', "perm-slot", &perm_slot, SOURCE_CMD_STRICT }, { 'b', 182, "delete-wal", &delete_wal, SOURCE_CMD_STRICT }, { 'b', 183, "delete-expired", &delete_expired, SOURCE_CMD_STRICT }, @@ -940,14 +936,13 @@ main(int argc, char *argv[]) wal_file_name, instanceState->instance_name, instance_config.system_identifier, system_id); } -#if PG_VERSION_NUM >= 100000 if (temp_slot && perm_slot) elog(ERROR, "You cannot specify \"--perm-slot\" option with the \"--temp-slot\" option"); /* if slot name was not provided for temp slot, use default slot name */ if (!replication_slot && temp_slot) replication_slot = DEFAULT_TEMP_SLOT_NAME; -#endif + if (!replication_slot && perm_slot) replication_slot = DEFAULT_PERMANENT_SLOT_NAME; diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 68c0276d3..7ce455459 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -65,13 +65,8 @@ extern const char *PROGRAM_EMAIL; #define DATABASE_DIR "database" #define BACKUPS_DIR "backups" #define WAL_SUBDIR "wal" -#if PG_VERSION_NUM >= 100000 #define PG_XLOG_DIR "pg_wal" #define PG_LOG_DIR "log" -#else -#define PG_XLOG_DIR "pg_xlog" -#define PG_LOG_DIR "pg_log" -#endif #define PG_TBLSPC_DIR "pg_tblspc" #define PG_GLOBAL_DIR "global" #define BACKUP_CONTROL_FILE "backup.control" @@ -93,7 +88,6 @@ extern const char *PROGRAM_EMAIL; /* Timeout defaults */ #define ARCHIVE_TIMEOUT_DEFAULT 300 -#define REPLICA_TIMEOUT_DEFAULT 300 #define LOCK_TIMEOUT 60 #define LOCK_STALE_TIMEOUT 30 #define LOG_FREQ 10 @@ -379,9 +373,6 @@ typedef struct InstanceConfig char *external_dir_str; ConnectionOptions conn_opt; - ConnectionOptions master_conn_opt; - - uint32 replica_timeout; //Deprecated. Not used anywhere /* Wait timeout for WAL segment archiving */ uint32 archive_timeout; @@ -775,11 +766,8 @@ extern bool stream_wal; extern bool show_color; extern bool progress; extern bool is_archive_cmd; /* true for archive-{get,push} */ -/* In pre-10 'replication_slot' is defined in receivelog.h */ extern char *replication_slot; -#if PG_VERSION_NUM >= 100000 extern bool temp_slot; -#endif extern bool perm_slot; /* backup options */ @@ -788,8 +776,6 @@ extern bool smooth_checkpoint; /* remote probackup options */ extern char* remote_agent; -extern bool exclusive_backup; - /* delete options */ extern bool delete_wal; extern bool delete_expired; @@ -1282,9 +1268,9 @@ extern void pg_start_backup(const char *label, bool smooth, pgBackup *backup, PGNodeInfo *nodeInfo, PGconn *conn); extern void pg_silent_client_messages(PGconn *conn); extern void pg_create_restore_point(PGconn *conn, time_t backup_start_time); -extern void pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica, bool is_exclusive, char **query_text); +extern void pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica, char **query_text); extern void pg_stop_backup_consume(PGconn *conn, int server_version, - bool is_exclusive, uint32 timeout, const char *query_text, + uint32 timeout, const char *query_text, PGStopBackupResult *result); extern void pg_stop_backup_write_file_helper(const char *path, const char *filename, const char *error_msg_filename, const void *data, size_t len, parray *file_list); diff --git a/src/stream.c b/src/stream.c index 1ee8dee37..b10eb7308 100644 --- a/src/stream.c +++ b/src/stream.c @@ -2,7 +2,7 @@ * * stream.c: pg_probackup specific code for WAL streaming * - * Portions Copyright (c) 2015-2020, Postgres Professional + * Portions Copyright (c) 2015-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -174,10 +174,10 @@ checkpoint_timeout(PGconn *backup_conn) * CreateReplicationSlot(PGconn *conn, const char *slot_name, const char *plugin, * bool is_temporary, bool is_physical, bool reserve_wal, * bool slot_exists_ok) - * PG 9.5-10 + * PG 10 * CreateReplicationSlot(PGconn *conn, const char *slot_name, const char *plugin, * bool is_physical, bool slot_exists_ok) - * NOTE: PG 9.6 and 10 support reserve_wal in + * NOTE: PG 10 support reserve_wal in * pg_catalog.pg_create_physical_replication_slot(slot_name name [, immediately_reserve boolean]) * and * CREATE_REPLICATION_SLOT slot_name { PHYSICAL [ RESERVE_WAL ] | LOGICAL output_plugin } @@ -194,7 +194,7 @@ CreateReplicationSlot_compat(PGconn *conn, const char *slot_name, const char *pl #elif PG_VERSION_NUM >= 110000 return CreateReplicationSlot(conn, slot_name, plugin, is_temporary, is_physical, /* reserve_wal = */ true, slot_exists_ok); -#elif PG_VERSION_NUM >= 100000 +#else /* * PG-10 doesn't support creating temp_slot by calling CreateReplicationSlot(), but * it will be created by setting StreamCtl.temp_slot later in StreamLog() @@ -203,10 +203,6 @@ CreateReplicationSlot_compat(PGconn *conn, const char *slot_name, const char *pl return CreateReplicationSlot(conn, slot_name, plugin, /*is_temporary,*/ is_physical, /*reserve_wal,*/ slot_exists_ok); else return true; -#else - /* these parameters not supported in PG < 10 */ - Assert(!is_temporary); - return CreateReplicationSlot(conn, slot_name, plugin, /*is_temporary,*/ is_physical, /*reserve_wal,*/ slot_exists_ok); #endif } @@ -229,13 +225,8 @@ StreamLog(void *arg) stream_stop_begin = 0; /* Create repslot */ -#if PG_VERSION_NUM >= 100000 if (temp_slot || perm_slot) if (!CreateReplicationSlot_compat(stream_arg->conn, replication_slot, NULL, temp_slot, true, false)) -#else - if (perm_slot) - if (!CreateReplicationSlot_compat(stream_arg->conn, replication_slot, NULL, false, true, false)) -#endif { interrupted = true; elog(ERROR, "Couldn't create physical replication slot %s", replication_slot); @@ -248,18 +239,13 @@ StreamLog(void *arg) elog(LOG, "started streaming WAL at %X/%X (timeline %u) using%s slot %s", (uint32) (stream_arg->startpos >> 32), (uint32) stream_arg->startpos, stream_arg->starttli, -#if PG_VERSION_NUM >= 100000 temp_slot ? " temporary" : "", -#else - "", -#endif replication_slot); else elog(LOG, "started streaming WAL at %X/%X (timeline %u)", (uint32) (stream_arg->startpos >> 32), (uint32) stream_arg->startpos, stream_arg->starttli); -#if PG_VERSION_NUM >= 90600 { StreamCtl ctl; @@ -274,7 +260,6 @@ StreamLog(void *arg) ctl.synchronous = false; ctl.mark_done = false; -#if PG_VERSION_NUM >= 100000 ctl.walmethod = CreateWalDirectoryMethod( stream_arg->basedir, // (instance_config.compress_alg == NONE_COMPRESS) ? 0 : instance_config.compress_level, @@ -284,13 +269,10 @@ StreamLog(void *arg) ctl.stop_socket = PGINVALID_SOCKET; ctl.do_sync = false; /* We sync all files at the end of backup */ // ctl.mark_done /* for future use in s3 */ -#if PG_VERSION_NUM >= 100000 && PG_VERSION_NUM < 110000 +#if PG_VERSION_NUM < 110000 /* StreamCtl.temp_slot used only for PG-10, in PG>10, temp_slots are created by calling CreateReplicationSlot() */ ctl.temp_slot = temp_slot; -#endif /* PG_VERSION_NUM >= 100000 && PG_VERSION_NUM < 110000 */ -#else /* PG_VERSION_NUM < 100000 */ - ctl.basedir = (char *) stream_arg->basedir; -#endif /* PG_VERSION_NUM >= 100000 */ +#endif /* PG_VERSION_NUM < 110000 */ if (ReceiveXlogStream(stream_arg->conn, &ctl) == false) { @@ -298,25 +280,13 @@ StreamLog(void *arg) elog(ERROR, "Problem in receivexlog"); } -#if PG_VERSION_NUM >= 100000 if (!ctl.walmethod->finish()) { interrupted = true; elog(ERROR, "Could not finish writing WAL files: %s", strerror(errno)); } -#endif /* PG_VERSION_NUM >= 100000 */ - } -#else /* PG_VERSION_NUM < 90600 */ - /* PG-9.5 */ - if (ReceiveXlogStream(stream_arg->conn, stream_arg->startpos, stream_arg->starttli, - NULL, (char *) stream_arg->basedir, stop_streaming, - standby_message_timeout, NULL, false, false) == false) - { - interrupted = true; - elog(ERROR, "Problem in receivexlog"); } -#endif /* PG_VERSION_NUM >= 90600 */ /* be paranoid and sort xlog_files_list, * so if stop_lsn segno is already in the list, diff --git a/src/util.c b/src/util.c index 846848619..b58d88f96 100644 --- a/src/util.c +++ b/src/util.c @@ -102,11 +102,7 @@ checkControlFile(ControlFileData *ControlFile) static void digestControlFile(ControlFileData *ControlFile, char *src, size_t size) { -#if PG_VERSION_NUM >= 100000 int ControlFileSize = PG_CONTROL_FILE_SIZE; -#else - int ControlFileSize = PG_CONTROL_SIZE; -#endif if (size != ControlFileSize) elog(ERROR, "unexpected control file size %d, expected %d", @@ -127,11 +123,7 @@ writeControlFile(fio_location location, const char *path, ControlFileData *Contr int fd; char *buffer = NULL; -#if PG_VERSION_NUM >= 100000 int ControlFileSize = PG_CONTROL_FILE_SIZE; -#else - int ControlFileSize = PG_CONTROL_SIZE; -#endif /* copy controlFileSize */ buffer = pg_malloc0(ControlFileSize); @@ -207,44 +199,25 @@ get_current_timeline_from_control(fio_location location, const char *pgdata_path } /* - * Get last check point record ptr from pg_tonrol. + * Get last check point record ptr from pg_control. */ XLogRecPtr get_checkpoint_location(PGconn *conn) { -#if PG_VERSION_NUM >= 90600 PGresult *res; uint32 lsn_hi; uint32 lsn_lo; XLogRecPtr lsn; -#if PG_VERSION_NUM >= 100000 res = pgut_execute(conn, "SELECT checkpoint_lsn FROM pg_catalog.pg_control_checkpoint()", 0, NULL); -#else - res = pgut_execute(conn, - "SELECT checkpoint_location FROM pg_catalog.pg_control_checkpoint()", - 0, NULL); -#endif XLogDataFromLSN(PQgetvalue(res, 0, 0), &lsn_hi, &lsn_lo); PQclear(res); /* Calculate LSN */ lsn = ((uint64) lsn_hi) << 32 | lsn_lo; return lsn; -#else - /* PG-9.5 */ - char *buffer; - size_t size; - ControlFileData ControlFile; - - buffer = slurpFile(FIO_DB_HOST, instance_config.pgdata, XLOG_CONTROL_FILE, &size, false); - digestControlFile(&ControlFile, buffer, size); - pg_free(buffer); - - return ControlFile.checkPoint; -#endif } uint64 @@ -267,7 +240,6 @@ get_system_identifier(fio_location location, const char *pgdata_path, bool safe) uint64 get_remote_system_identifier(PGconn *conn) { -#if PG_VERSION_NUM >= 90600 PGresult *res; uint64 system_id_conn; char *val; @@ -284,18 +256,6 @@ get_remote_system_identifier(PGconn *conn) PQclear(res); return system_id_conn; -#else - /* PG-9.5 */ - char *buffer; - size_t size; - ControlFileData ControlFile; - - buffer = slurpFile(FIO_DB_HOST, instance_config.pgdata, XLOG_CONTROL_FILE, &size, false); - digestControlFile(&ControlFile, buffer, size); - pg_free(buffer); - - return ControlFile.system_identifier; -#endif } uint32 diff --git a/src/utils/file.c b/src/utils/file.c index 3059a5605..86977a19a 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -38,7 +38,6 @@ typedef struct bool follow_symlink; bool add_root; bool backup_logs; - bool exclusive_backup; bool skip_hidden; int external_dir_num; } fio_list_dir_request; @@ -2798,7 +2797,6 @@ fio_list_dir_internal(parray *files, const char *root, bool exclude, req.follow_symlink = follow_symlink; req.add_root = add_root; req.backup_logs = backup_logs; - req.exclusive_backup = exclusive_backup; req.skip_hidden = skip_hidden; req.external_dir_num = external_dir_num; @@ -2891,7 +2889,6 @@ fio_list_dir_impl(int out, char* buf) * TODO: correctly send elog messages from agent to main process. */ instance_config.logger.log_level_console = ERROR; - exclusive_backup = req->exclusive_backup; dir_list_file(file_files, req->path, req->exclude, req->follow_symlink, req->add_root, req->backup_logs, req->skip_hidden, @@ -4853,4 +4850,4 @@ init_pio_objects(void) localDrive = bindref_pioDrive($alloc(pioLocalDrive)); remoteDrive = bindref_pioDrive($alloc(pioRemoteDrive)); -} \ No newline at end of file +} diff --git a/src/utils/pgut.c b/src/utils/pgut.c index c220b807d..f1b8da0b2 100644 --- a/src/utils/pgut.c +++ b/src/utils/pgut.c @@ -3,7 +3,7 @@ * pgut.c * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2017-2021, Postgres Professional + * Portions Copyright (c) 2017-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -20,11 +20,7 @@ #include "common/string.h" #endif -#if PG_VERSION_NUM >= 100000 #include "common/connect.h" -#else -#include "fe_utils/connect.h" -#endif #include @@ -94,7 +90,7 @@ prompt_for_password(const char *username) snprintf(message, lengthof(message), "Password for user %s: ", username); password = simple_prompt(message , false); } -#elif PG_VERSION_NUM >= 100000 +#else password = (char *) pgut_malloc(sizeof(char) * 100 + 1); if (username == NULL) simple_prompt("Password: ", password, 100, false); @@ -104,17 +100,7 @@ prompt_for_password(const char *username) snprintf(message, lengthof(message), "Password for user %s: ", username); simple_prompt(message, password, 100, false); } -#else - if (username == NULL) - password = simple_prompt("Password: ", 100, false); - else - { - char message[256]; - snprintf(message, lengthof(message), "Password for user %s: ", username); - password = simple_prompt(message, 100, false); - } #endif - in_password = false; } diff --git a/tests/archive.py b/tests/archive.py index 52fb225e8..be5e33fbc 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -84,11 +84,6 @@ def test_pgpro434_2(self): 'checkpoint_timeout': '30s'} ) - if self.get_version(node) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because pg_control_checkpoint() is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -264,15 +259,9 @@ def test_pgpro434_3(self): with open(log_file, 'r') as f: log_content = f.read() - # in PG =< 9.6 pg_stop_backup always wait - if self.get_version(node) < 100000: - self.assertIn( - "ERROR: pg_stop_backup doesn't answer in 60 seconds, cancel it", - log_content) - else: - self.assertIn( - "ERROR: WAL segment 000000010000000000000003 could not be archived in 60 seconds", - log_content) + self.assertIn( + "ERROR: WAL segment 000000010000000000000003 could not be archived in 60 seconds", + log_content) log_file = os.path.join(node.logs_dir, 'postgresql.log') with open(log_file, 'r') as f: @@ -418,12 +407,8 @@ def test_archive_push_file_exists(self): self.assertNotIn( 'pg_probackup archive-push completed successfully', log_content) - if self.get_version(node) < 100000: - wal_src = os.path.join( - node.data_dir, 'pg_xlog', '000000010000000000000001') - else: - wal_src = os.path.join( - node.data_dir, 'pg_wal', '000000010000000000000001') + wal_src = os.path.join( + node.data_dir, 'pg_wal', '000000010000000000000001') if self.archive_compress: with open(wal_src, 'rb') as f_in, gzip.open( @@ -555,16 +540,10 @@ def test_archive_push_partial_file_exists(self): "postgres", "INSERT INTO t1 VALUES (1) RETURNING (xmin)").decode('utf-8').rstrip() - if self.get_version(node) < 100000: - filename_orig = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location());").rstrip() - else: - filename_orig = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn());").rstrip() + filename_orig = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn());").rstrip() filename_orig = filename_orig.decode('utf-8') @@ -634,16 +613,10 @@ def test_archive_push_part_file_exists_not_stale(self): "postgres", "create table t2()") - if self.get_version(node) < 100000: - filename_orig = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location());").rstrip() - else: - filename_orig = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn());").rstrip() + filename_orig = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn());").rstrip() filename_orig = filename_orig.decode('utf-8') @@ -708,11 +681,6 @@ def test_replica_archive(self): 'checkpoint_timeout': '30s', 'max_wal_size': '32MB'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) # ADD INSTANCE 'MASTER' self.add_instance(backup_dir, 'master', master) @@ -757,9 +725,6 @@ def test_replica_archive(self): backup_dir, 'replica', replica, options=[ '--archive-timeout=30', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), '--stream']) self.validate_pb(backup_dir, 'replica') @@ -796,9 +761,6 @@ def test_replica_archive(self): replica, backup_type='page', options=[ '--archive-timeout=60', - '--master-db=postgres', - '--master-host=localhost', - '--master-port={0}'.format(master.port), '--stream']) self.validate_pb(backup_dir, 'replica') @@ -839,11 +801,6 @@ def test_master_and_replica_parallel_archiving(self): 'archive_timeout': '10s'} ) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - replica = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'replica')) replica.cleanup() @@ -894,9 +851,6 @@ def test_master_and_replica_parallel_archiving(self): backup_dir, 'replica', replica, options=[ '--archive-timeout=30', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), '--stream']) self.validate_pb(backup_dir, 'replica') @@ -921,9 +875,6 @@ def test_basic_master_and_replica_concurrent_archiving(self): set replica with archiving, make sure that archiving on both node is working. """ - if self.pg_config_version < self.version_to_num('9.6.0'): - return unittest.skip('You need PostgreSQL >= 9.6 for this test') - fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') master = self.make_simple_node( @@ -934,11 +885,6 @@ def test_basic_master_and_replica_concurrent_archiving(self): 'checkpoint_timeout': '30s', 'archive_timeout': '10s'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - replica = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'replica')) replica.cleanup() @@ -1115,10 +1061,7 @@ def test_archive_pg_receivexlog(self): self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() - if self.get_version(node) < 100000: - pg_receivexlog_path = self.get_bin_path('pg_receivexlog') - else: - pg_receivexlog_path = self.get_bin_path('pg_receivewal') + pg_receivexlog_path = self.get_bin_path('pg_receivewal') pg_receivexlog = self.run_binary( [ @@ -1188,11 +1131,8 @@ def test_archive_pg_receivexlog_compression_pg10(self): self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() - if self.get_version(node) < self.version_to_num('10.0'): - return unittest.skip('You need PostgreSQL >= 10 for this test') - else: - pg_receivexlog_path = self.get_bin_path('pg_receivewal') + pg_receivexlog_path = self.get_bin_path('pg_receivewal') pg_receivexlog = self.run_binary( [ pg_receivexlog_path, '-p', str(node.port), '--synchronous', @@ -1269,11 +1209,6 @@ def test_archive_catalog(self): 'archive_timeout': '30s', 'checkpoint_timeout': '30s'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) self.set_archiving(backup_dir, 'master', master) @@ -1930,10 +1865,6 @@ def test_waldir_outside_pgdata_archiving(self): """ check that archive-push works correct with symlinked waldir """ - if self.pg_config_version < self.version_to_num('10.0'): - return unittest.skip( - 'Skipped because waldir outside pgdata is supported since PG 10') - fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') external_wal_dir = os.path.join(self.tmp_path, module_name, fname, 'ext_wal_dir') @@ -2041,10 +1972,7 @@ def test_archiving_and_slots(self): self.set_archiving(backup_dir, 'node', node, log_level='verbose') node.slow_start() - if self.get_version(node) < 100000: - pg_receivexlog_path = self.get_bin_path('pg_receivexlog') - else: - pg_receivexlog_path = self.get_bin_path('pg_receivewal') + pg_receivexlog_path = self.get_bin_path('pg_receivewal') # "pg_receivewal --create-slot --slot archive_slot --if-not-exists " # "&& pg_receivewal --synchronous -Z 1 /tmp/wal --slot archive_slot --no-loop" @@ -2167,22 +2095,13 @@ def test_archive_pg_receivexlog_partial_handling(self): set_replication=True, initdb_params=['--data-checksums']) - if self.get_version(node) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() - if self.get_version(node) < 100000: - app_name = 'pg_receivexlog' - pg_receivexlog_path = self.get_bin_path('pg_receivexlog') - else: - app_name = 'pg_receivewal' - pg_receivexlog_path = self.get_bin_path('pg_receivewal') + app_name = 'pg_receivewal' + pg_receivexlog_path = self.get_bin_path('pg_receivewal') cmdline = [ pg_receivexlog_path, '-p', str(node.port), '--synchronous', @@ -2376,11 +2295,6 @@ def test_archive_get_batching_sanity(self): set_replication=True, initdb_params=['--data-checksums']) - if self.get_version(node) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2600,16 +2514,10 @@ def test_archive_show_partial_files_handling(self): "postgres", "create table t1()") - if self.get_version(node) < 100000: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location())").rstrip() - else: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() + filename = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() filename = filename.decode('utf-8') @@ -2624,16 +2532,10 @@ def test_archive_show_partial_files_handling(self): "postgres", "create table t2()") - if self.get_version(node) < 100000: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location())").rstrip() - else: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() + filename = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() filename = filename.decode('utf-8') @@ -2648,16 +2550,10 @@ def test_archive_show_partial_files_handling(self): "postgres", "create table t3()") - if self.get_version(node) < 100000: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location())").rstrip() - else: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() + filename = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() filename = filename.decode('utf-8') @@ -2672,16 +2568,10 @@ def test_archive_show_partial_files_handling(self): "postgres", "create table t4()") - if self.get_version(node) < 100000: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location())").rstrip() - else: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() + filename = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() filename = filename.decode('utf-8') diff --git a/tests/auth_test.py b/tests/auth_test.py index 78af21be9..16c73308f 100644 --- a/tests/auth_test.py +++ b/tests/auth_test.py @@ -62,14 +62,9 @@ def test_backup_via_unprivileged_user(self): "GRANT EXECUTE ON FUNCTION" " pg_start_backup(text, boolean, boolean) TO backup;") - if self.get_version(node) < 100000: - node.safe_psql( - 'postgres', - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup") - else: - node.safe_psql( - 'postgres', - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup") + node.safe_psql( + 'postgres', + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup") try: self.backup_node( @@ -103,19 +98,10 @@ def test_backup_via_unprivileged_user(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - if self.get_version(node) < self.version_to_num('10.0'): - node.safe_psql( - "postgres", - "GRANT EXECUTE ON FUNCTION pg_stop_backup(boolean) TO backup") - else: - node.safe_psql( - "postgres", - "GRANT EXECUTE ON FUNCTION " - "pg_stop_backup(boolean, boolean) TO backup") - # Do this for ptrack backups - node.safe_psql( - "postgres", - "GRANT EXECUTE ON FUNCTION pg_stop_backup() TO backup") + node.safe_psql( + "postgres", + "GRANT EXECUTE ON FUNCTION " + "pg_stop_backup(boolean, boolean) TO backup") self.backup_node( backup_dir, 'node', node, options=['-U', 'backup']) @@ -184,8 +170,6 @@ def setUpClass(cls): "GRANT EXECUTE ON FUNCTION current_setting(text) TO backup; " "GRANT EXECUTE ON FUNCTION pg_is_in_recovery() TO backup; " "GRANT EXECUTE ON FUNCTION pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_stop_backup(boolean) TO backup; " "GRANT EXECUTE ON FUNCTION pg_create_restore_point(text) TO backup; " "GRANT EXECUTE ON FUNCTION pg_switch_xlog() TO backup; " "GRANT EXECUTE ON FUNCTION txid_current() TO backup; " diff --git a/tests/backup.py b/tests/backup.py index 7d02f5b39..3dfc5c5e1 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -1427,9 +1427,6 @@ def test_basic_temp_slot_for_stream_backup(self): initdb_params=['--data-checksums'], pg_options={'max_wal_size': '40MB'}) - if self.get_version(node) < self.version_to_num('10.0'): - return unittest.skip('You need PostgreSQL >= 10 for this test') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1856,120 +1853,43 @@ def test_backup_with_least_privileges_role(self): "CREATE SCHEMA ptrack; " "CREATE EXTENSION ptrack WITH SCHEMA ptrack") - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;" - ) - # >= 10 - else: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;" - ) + # PG >= 10 + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) if self.ptrack: node.safe_psql( @@ -2241,62 +2161,24 @@ def test_backup_with_less_privileges_role(self): 'backupdb', 'CREATE EXTENSION ptrack') - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "BEGIN; " - "CREATE ROLE backup WITH LOGIN; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " - "COMMIT;" - ) - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "BEGIN; " - "CREATE ROLE backup WITH LOGIN; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " - "COMMIT;" - ) - # >= 10 - else: - node.safe_psql( - 'backupdb', - "BEGIN; " - "CREATE ROLE backup WITH LOGIN; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " - "COMMIT;" - ) + # PG >= 10 + node.safe_psql( + 'backupdb', + "BEGIN; " + "CREATE ROLE backup WITH LOGIN; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " + "COMMIT;" + ) # enable STREAM backup node.safe_psql( @@ -2336,10 +2218,6 @@ def test_backup_with_less_privileges_role(self): backup_dir, 'node', node, backup_type='ptrack', datname='backupdb', options=['--stream', '-U', 'backup']) - if self.get_version(node) < 90600: - self.del_test_dir(module_name, fname) - return - # Restore as replica replica = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'replica')) @@ -3026,74 +2904,28 @@ def test_missing_replication_permission(self): 'postgres', 'CREATE DATABASE backupdb') - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;" - ) - # >= 10 - else: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;" - ) + # PG >= 10 + node.safe_psql( + 'backupdb', + "CREATE ROLE backup WITH LOGIN; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) if ProbackupTest.enterprise: node.safe_psql( @@ -3159,76 +2991,28 @@ def test_missing_replication_permission_1(self): 'postgres', 'CREATE DATABASE backupdb') - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;" - ) - # >= 10 - else: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;" - - ) + # PG >= 10 + node.safe_psql( + 'backupdb', + "CREATE ROLE backup WITH LOGIN; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) if ProbackupTest.enterprise: node.safe_psql( @@ -3383,18 +3167,9 @@ def test_pg_stop_backup_missing_permissions(self): self.simple_bootstrap(node, 'backup') - if self.get_version(node) < 90600: - node.safe_psql( - 'postgres', - 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() FROM backup') - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'postgres', - 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) FROM backup') - else: - node.safe_psql( - 'postgres', - 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) FROM backup') + node.safe_psql( + 'postgres', + 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) FROM backup') # Full backup in streaming mode try: diff --git a/tests/catchup.py b/tests/catchup.py index a83755c54..ac243da72 100644 --- a/tests/catchup.py +++ b/tests/catchup.py @@ -1231,27 +1231,26 @@ def test_catchup_with_replication_slot(self): ).decode('utf-8').rstrip() self.assertEqual(slot_name, 'pg_probackup_perm_slot', 'Slot name mismatch') - # 5. --perm-slot --temp-slot (PG>=10) - if self.get_version(src_pg) >= self.version_to_num('10.0'): - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_5')) - try: - self.catchup_node( - backup_mode = 'FULL', - source_pgdata = src_pg.data_dir, - destination_node = dst_pg, - options = [ - '-d', 'postgres', '-p', str(src_pg.port), '--stream', - '--perm-slot', - '--temp-slot' - ] - ) - self.assertEqual(1, 0, "Expecting Error because conflicting options --perm-slot and --temp-slot used together\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: You cannot specify "--perm-slot" option with the "--temp-slot" option', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + # 5. --perm-slot --temp-slot + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_5')) + try: + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = [ + '-d', 'postgres', '-p', str(src_pg.port), '--stream', + '--perm-slot', + '--temp-slot' + ] + ) + self.assertEqual(1, 0, "Expecting Error because conflicting options --perm-slot and --temp-slot used together\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: You cannot specify "--perm-slot" option with the "--temp-slot" option', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) #self.assertEqual(1, 0, 'Stop test') self.del_test_dir(module_name, self.fname) diff --git a/tests/checkdb.py b/tests/checkdb.py index 2df946cf6..c5465efca 100644 --- a/tests/checkdb.py +++ b/tests/checkdb.py @@ -640,67 +640,8 @@ def test_checkdb_with_least_privileges(self): "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC;") - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - 'CREATE ROLE backup WITH LOGIN; ' - 'GRANT CONNECT ON DATABASE backupdb to backup; ' - 'GRANT USAGE ON SCHEMA pg_catalog TO backup; ' - 'GRANT USAGE ON SCHEMA public TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_am TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_class TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_index TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.texteq(text, text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.charne("char", "char") TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; ' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' # amcheck-next function - ) - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - 'CREATE ROLE backup WITH LOGIN; ' - 'GRANT CONNECT ON DATABASE backupdb to backup; ' - 'GRANT USAGE ON SCHEMA pg_catalog TO backup; ' - 'GRANT USAGE ON SCHEMA public TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_am TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_class TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_index TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.texteq(text, text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.charne("char", "char") TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; ' -# 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup; ' - ) # PG 10 - elif self.get_version(node) > 100000 and self.get_version(node) < 110000: + if self.get_version(node) < 110000: node.safe_psql( 'backupdb', 'CREATE ROLE backup WITH LOGIN; ' diff --git a/tests/compatibility.py b/tests/compatibility.py index e274c22be..e3aab15e0 100644 --- a/tests/compatibility.py +++ b/tests/compatibility.py @@ -1482,3 +1482,51 @@ def test_compatibility_tablespace(self): # Clean after yourself self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + def test_compatibility_master_options(self): + """ + Test correctness of handling of removed master-db, master-host, master-port, + master-user and replica-timeout options + """ + self.assertTrue( + self.version_to_num(self.old_probackup_version) <= self.version_to_num('2.6.0'), + 'You need pg_probackup old_binary =< 2.6.0 for this test') + + fname = self.id().split('.')[3] + node = self.make_simple_node(base_dir=os.path.join(module_name, fname, 'node')) + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + + self.init_pb(backup_dir, old_binary=True) + self.add_instance(backup_dir, 'node', node, old_binary=True) + + # add deprecated options (using probackup< 2.6) into pg_probackup.conf + # don't care about option values, we can use random values here + self.set_config( + backup_dir, 'node', + options=[ + '--master-db=postgres', + '--master-host=localhost', + '--master-port=5432', + '--master-user={0}'.format(self.user), + '--replica-timeout=100500'], + old_binary=True) + + # and try to show config with new binary (those options must be silently skipped) + self.show_config(backup_dir, 'node', old_binary=False) + + # store config with new version (those options must disappear from config) + self.set_config( + backup_dir, 'node', + options=[], + old_binary=False) + + # and check absence + config_options = self.show_config(backup_dir, 'node', old_binary=False) + self.assertFalse( + ['master-db', 'master-host', 'master-port', 'master-user', 'replica-timeout'] & config_options.keys(), + 'Obsolete options found in new config') + + # Clean after yourself + self.del_test_dir(module_name, fname) + diff --git a/tests/false_positive.py b/tests/false_positive.py index a101f8107..9cff54185 100644 --- a/tests/false_positive.py +++ b/tests/false_positive.py @@ -113,9 +113,6 @@ def test_pg_10_waldir(self): """ test group access for PG >= 11 """ - if self.pg_config_version < self.version_to_num('10.0'): - return unittest.skip('You need PostgreSQL >= 10 for this test') - fname = self.id().split('.')[3] wal_dir = os.path.join( os.path.join(self.tmp_path, module_name, fname), 'wal_dir') diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 418ef4e17..e7390d6b1 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -426,52 +426,21 @@ def simple_bootstrap(self, node, role) -> None: 'postgres', 'CREATE ROLE {0} WITH LOGIN REPLICATION'.format(role)) - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'postgres', - 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0};'.format(role)) - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'postgres', - 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO {0};'.format(role)) - # >= 10 - else: - node.safe_psql( - 'postgres', - 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO {0};'.format(role)) + # PG >= 10 + node.safe_psql( + 'postgres', + 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO {0};'.format(role)) def create_tblspace_in_node(self, node, tblspc_name, tblspc_path=None, cfs=False): res = node.execute( @@ -587,13 +556,7 @@ def get_md5_per_page_for_fork(self, file, size_in_pages): def get_ptrack_bits_per_page_for_fork(self, node, file, size=[]): - if self.get_pgpro_edition(node) == 'enterprise': - if self.get_version(node) < self.version_to_num('10.0'): - header_size = 48 - else: - header_size = 24 - else: - header_size = 24 + header_size = 24 ptrack_bits_for_fork = [] # TODO: use macro instead of hard coded 8KB @@ -1560,25 +1523,15 @@ def version_to_num(self, version): def switch_wal_segment(self, node): """ - Execute pg_switch_wal/xlog() in given node + Execute pg_switch_wal() in given node Args: node: an instance of PostgresNode or NodeConnection class """ if isinstance(node, testgres.PostgresNode): - if self.version_to_num( - node.safe_psql('postgres', 'show server_version').decode('utf-8') - ) >= self.version_to_num('10.0'): - node.safe_psql('postgres', 'select pg_switch_wal()') - else: - node.safe_psql('postgres', 'select pg_switch_xlog()') + node.safe_psql('postgres', 'select pg_switch_wal()') else: - if self.version_to_num( - node.execute('show server_version')[0][0] - ) >= self.version_to_num('10.0'): - node.execute('select pg_switch_wal()') - else: - node.execute('select pg_switch_xlog()') + node.execute('select pg_switch_wal()') sleep(1) @@ -1588,12 +1541,8 @@ def wait_until_replica_catch_with_master(self, master, replica): 'postgres', 'show server_version').decode('utf-8').rstrip() - if self.version_to_num(version) >= self.version_to_num('10.0'): - master_function = 'pg_catalog.pg_current_wal_lsn()' - replica_function = 'pg_catalog.pg_last_wal_replay_lsn()' - else: - master_function = 'pg_catalog.pg_current_xlog_location()' - replica_function = 'pg_catalog.pg_last_xlog_replay_location()' + master_function = 'pg_catalog.pg_current_wal_lsn()' + replica_function = 'pg_catalog.pg_last_wal_replay_lsn()' lsn = master.safe_psql( 'postgres', diff --git a/tests/incr_restore.py b/tests/incr_restore.py index cb684a23a..b3a2ce4a6 100644 --- a/tests/incr_restore.py +++ b/tests/incr_restore.py @@ -1492,11 +1492,6 @@ def test_make_replica_via_incr_checksum_restore(self): set_replication=True, initdb_params=['--data-checksums']) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', master) self.set_archiving(backup_dir, 'node', master, replica=True) @@ -1565,11 +1560,6 @@ def test_make_replica_via_incr_lsn_restore(self): set_replication=True, initdb_params=['--data-checksums']) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', master) self.set_archiving(backup_dir, 'node', master, replica=True) diff --git a/tests/pgpro2068.py b/tests/pgpro2068.py index 3baa0ba0b..454cac532 100644 --- a/tests/pgpro2068.py +++ b/tests/pgpro2068.py @@ -136,29 +136,7 @@ def test_minrecpoint_on_replica(self): recovery_config, "recovery_target_action = 'pause'") replica.slow_start(replica=True) - if self.get_version(node) < 100000: - script = ''' -DO -$$ -relations = plpy.execute("select class.oid from pg_class class WHERE class.relkind IN ('r', 'i', 't', 'm') and class.relpersistence = 'p'") -current_xlog_lsn = plpy.execute("SELECT min_recovery_end_location as lsn FROM pg_control_recovery()")[0]['lsn'] -plpy.notice('CURRENT LSN: {0}'.format(current_xlog_lsn)) -found_corruption = False -for relation in relations: - pages_from_future = plpy.execute("with number_of_blocks as (select blknum from generate_series(0, pg_relation_size({0}) / 8192 -1) as blknum) select blknum, lsn, checksum, flags, lower, upper, special, pagesize, version, prune_xid from number_of_blocks, page_header(get_raw_page('{0}'::oid::regclass::text, number_of_blocks.blknum::int)) where lsn > '{1}'::pg_lsn".format(relation['oid'], current_xlog_lsn)) - - if pages_from_future.nrows() == 0: - continue - - for page in pages_from_future: - plpy.notice('Found page from future. OID: {0}, BLKNUM: {1}, LSN: {2}'.format(relation['oid'], page['blknum'], page['lsn'])) - found_corruption = True -if found_corruption: - plpy.error('Found Corruption') -$$ LANGUAGE plpython3u; -''' - else: - script = ''' + script = ''' DO $$ relations = plpy.execute("select class.oid from pg_class class WHERE class.relkind IN ('r', 'i', 't', 'm') and class.relpersistence = 'p'") diff --git a/tests/pgpro560.py b/tests/pgpro560.py index 7e10fef6a..2278b9ace 100644 --- a/tests/pgpro560.py +++ b/tests/pgpro560.py @@ -87,20 +87,12 @@ def test_pgpro560_systemid_mismatch(self): "Expecting Error because of SYSTEM ID mismatch.\n " "Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd)) except ProbackupException as e: - if self.get_version(node1) > 90600: - self.assertTrue( - 'ERROR: Backup data directory was ' - 'initialized for system id' in e.message and - 'but connected instance system id is' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - else: - self.assertIn( - 'ERROR: System identifier mismatch. ' - 'Connected PostgreSQL instance has system id', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.assertTrue( + 'ERROR: Backup data directory was ' + 'initialized for system id' in e.message and + 'but connected instance system id is' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) sleep(1) @@ -114,20 +106,12 @@ def test_pgpro560_systemid_mismatch(self): "Expecting Error because of of SYSTEM ID mismatch.\n " "Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd)) except ProbackupException as e: - if self.get_version(node1) > 90600: - self.assertTrue( - 'ERROR: Backup data directory was initialized ' - 'for system id' in e.message and - 'but connected instance system id is' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - else: - self.assertIn( - 'ERROR: System identifier mismatch. ' - 'Connected PostgreSQL instance has system id', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.assertTrue( + 'ERROR: Backup data directory was initialized ' + 'for system id' in e.message and + 'but connected instance system id is' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) # Clean after yourself self.del_test_dir(module_name, fname) diff --git a/tests/ptrack.py b/tests/ptrack.py index d46ece119..b5cc384bb 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -513,116 +513,41 @@ def test_ptrack_unprivileged(self): "postgres", "CREATE DATABASE backupdb") - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup; ' - ) - # >= 10 - else: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup; ' - ) + # PG >= 10 + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) node.safe_psql( "backupdb", @@ -1635,13 +1560,7 @@ def test_create_db_on_replica(self): self.backup_node( backup_dir, 'replica', replica, - options=[ - '-j10', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(node.port), - '--stream' - ] + options=['-j10', '--stream'] ) # CREATE DATABASE DB1 @@ -1659,13 +1578,7 @@ def test_create_db_on_replica(self): backup_id = self.backup_node( backup_dir, 'replica', replica, backup_type='ptrack', - options=[ - '-j10', - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(node.port) - ] + options=['-j10', '--stream'] ) if self.paranoia: @@ -2379,11 +2292,7 @@ def test_ptrack_clean_replica(self): backup_dir, 'replica', replica, - options=[ - '-j10', '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['-j10', '--stream']) master.safe_psql('postgres', 'checkpoint') for i in idx_ptrack: @@ -2410,11 +2319,7 @@ def test_ptrack_clean_replica(self): 'replica', replica, backup_type='ptrack', - options=[ - '-j10', '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['-j10', '--stream']) master.safe_psql('postgres', 'checkpoint') for i in idx_ptrack: @@ -2442,11 +2347,7 @@ def test_ptrack_clean_replica(self): 'replica', replica, backup_type='page', - options=[ - '-j10', '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), - '--stream']) + options=['-j10', '--stream']) master.safe_psql('postgres', 'checkpoint') for i in idx_ptrack: @@ -2512,8 +2413,7 @@ def test_ptrack_cluster_on_btree(self): idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) - self.backup_node( - backup_dir, 'node', node, options=['-j10', '--stream']) + self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream']) node.safe_psql('postgres', 'delete from t_heap where id%2 = 1') node.safe_psql('postgres', 'cluster t_heap using t_btree') @@ -2648,11 +2548,7 @@ def test_ptrack_cluster_on_btree_replica(self): master.safe_psql('postgres', 'vacuum t_heap') master.safe_psql('postgres', 'checkpoint') - self.backup_node( - backup_dir, 'replica', replica, options=[ - '-j10', '--stream', '--master-host=localhost', - '--master-db=postgres', '--master-port={0}'.format( - master.port)]) + self.backup_node(backup_dir, 'replica', replica, options=['-j10', '--stream']) for i in idx_ptrack: # get size of heap and indexes. size calculated in pages @@ -2749,9 +2645,7 @@ def test_ptrack_cluster_on_gist_replica(self): self.backup_node( backup_dir, 'replica', replica, options=[ - '-j10', '--stream', '--master-host=localhost', - '--master-db=postgres', '--master-port={0}'.format( - master.port)]) + '-j10', '--stream']) for i in idx_ptrack: # get size of heap and indexes. size calculated in pages @@ -2919,11 +2813,7 @@ def test_ptrack_empty_replica(self): backup_dir, 'replica', replica, - options=[ - '-j10', '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['-j10', '--stream']) # Create indexes for i in idx_ptrack: @@ -2943,11 +2833,7 @@ def test_ptrack_empty_replica(self): 'replica', replica, backup_type='ptrack', - options=[ - '-j1', '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['-j1', '--stream']) if self.paranoia: pgdata = self.pgdata_content(replica.data_dir) @@ -3116,12 +3002,7 @@ def test_basic_ptrack_truncate_replica(self): # Make backup to clean every ptrack self.backup_node( backup_dir, 'replica', replica, - options=[ - '-j10', - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['-j10', '--stream']) if replica.major_version < 11: for i in idx_ptrack: @@ -3145,12 +3026,7 @@ def test_basic_ptrack_truncate_replica(self): self.backup_node( backup_dir, 'replica', replica, backup_type='ptrack', - options=[ - '-j10', - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['-j10', '--stream']) pgdata = self.pgdata_content(replica.data_dir) @@ -3320,12 +3196,7 @@ def test_ptrack_vacuum_replica(self): replica.safe_psql('postgres', 'checkpoint') # Make FULL backup to clean every ptrack - self.backup_node( - backup_dir, 'replica', replica, options=[ - '-j10', '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), - '--stream']) + self.backup_node(backup_dir, 'replica', replica, options=['-j10', '--stream']) if replica.major_version < 11: for i in idx_ptrack: @@ -3505,12 +3376,7 @@ def test_ptrack_vacuum_bits_frozen_replica(self): # Take backup to clean every ptrack self.backup_node( backup_dir, 'replica', replica, - options=[ - '-j10', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), - '--stream']) + options=['-j10', '--stream']) if replica.major_version < 11: for i in idx_ptrack: @@ -3763,12 +3629,7 @@ def test_ptrack_vacuum_full_replica(self): # Take FULL backup to clean every ptrack self.backup_node( backup_dir, 'replica', replica, - options=[ - '-j10', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), - '--stream']) + options=['-j10', '--stream']) if replica.major_version < 11: for i in idx_ptrack: @@ -3935,13 +3796,7 @@ def test_ptrack_vacuum_truncate_replica(self): # Take FULL backup to clean every ptrack self.backup_node( backup_dir, 'replica', replica, - options=[ - '-j10', - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port) - ] + options=['-j10', '--stream'] ) if master.major_version < 11: diff --git a/tests/replica.py b/tests/replica.py index acf655aac..4fe009062 100644 --- a/tests/replica.py +++ b/tests/replica.py @@ -28,11 +28,6 @@ def test_replica_switchover(self): set_replication=True, initdb_params=['--data-checksums']) - if self.get_version(node1) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node1', node1) @@ -105,10 +100,6 @@ def test_replica_stream_ptrack_backup(self): if not self.ptrack: return unittest.skip('Skipped because ptrack support is disabled') - if self.pg_config_version > self.version_to_num('9.6.0'): - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') master = self.make_simple_node( @@ -161,11 +152,7 @@ def test_replica_stream_ptrack_backup(self): backup_id = self.backup_node( backup_dir, 'replica', replica, - options=[ - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['--stream']) self.validate_pb(backup_dir, 'replica') self.assertEqual( 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) @@ -197,11 +184,7 @@ def test_replica_stream_ptrack_backup(self): backup_id = self.backup_node( backup_dir, 'replica', replica, backup_type='ptrack', - options=[ - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['--stream']) self.validate_pb(backup_dir, 'replica') self.assertEqual( 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) @@ -239,11 +222,6 @@ def test_replica_archive_page_backup(self): 'checkpoint_timeout': '30s', 'max_wal_size': '32MB'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) self.set_archiving(backup_dir, 'master', master) @@ -293,11 +271,7 @@ def test_replica_archive_page_backup(self): backup_id = self.backup_node( backup_dir, 'replica', replica, - options=[ - '--archive-timeout=60', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['--archive-timeout=60']) self.validate_pb(backup_dir, 'replica') self.assertEqual( @@ -329,11 +303,7 @@ def test_replica_archive_page_backup(self): backup_id = self.backup_node( backup_dir, 'replica', replica, backup_type='page', - options=[ - '--archive-timeout=60', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['--archive-timeout=60']) pgbench.wait() @@ -381,11 +351,6 @@ def test_basic_make_replica_via_restore(self): pg_options={ 'archive_timeout': '10s'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) self.set_archiving(backup_dir, 'master', master) @@ -439,11 +404,6 @@ def test_take_backup_from_delayed_replica(self): initdb_params=['--data-checksums'], pg_options={'archive_timeout': '10s'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) self.set_archiving(backup_dir, 'master', master) @@ -552,11 +512,6 @@ def test_replica_promote(self): 'checkpoint_timeout': '30s', 'max_wal_size': '32MB'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) self.set_archiving(backup_dir, 'master', master) @@ -643,11 +598,6 @@ def test_replica_stop_lsn_null_offset(self): 'checkpoint_timeout': '1h', 'wal_level': 'replica'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', master) self.set_archiving(backup_dir, 'node', master) @@ -728,11 +678,6 @@ def test_replica_stop_lsn_null_offset_next_record(self): 'checkpoint_timeout': '1h', 'wal_level': 'replica'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) self.set_archiving(backup_dir, 'master', master) @@ -830,11 +775,6 @@ def test_archive_replica_null_offset(self): 'checkpoint_timeout': '1h', 'wal_level': 'replica'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', master) self.set_archiving(backup_dir, 'node', master) @@ -914,11 +854,6 @@ def test_archive_replica_not_null_offset(self): 'checkpoint_timeout': '1h', 'wal_level': 'replica'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', master) self.set_archiving(backup_dir, 'node', master) @@ -1003,11 +938,6 @@ def test_replica_toast(self): 'wal_level': 'replica', 'shared_buffers': '128MB'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) self.set_archiving(backup_dir, 'master', master) @@ -1105,11 +1035,6 @@ def test_start_stop_lsn_in_the_same_segno(self): 'wal_level': 'replica', 'shared_buffers': '128MB'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -1183,11 +1108,6 @@ def test_replica_promote_1(self): 'checkpoint_timeout': '1h', 'wal_level': 'replica'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) # set replica True, so archive_mode 'always' is used. @@ -1310,11 +1230,6 @@ def test_replica_promote_archive_delta(self): 'checkpoint_timeout': '30s', 'archive_timeout': '30s'}) - if self.get_version(node1) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node1) self.set_config( @@ -1435,11 +1350,6 @@ def test_replica_promote_archive_page(self): 'checkpoint_timeout': '30s', 'archive_timeout': '30s'}) - if self.get_version(node1) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node1) self.set_archiving(backup_dir, 'node', node1) @@ -1557,11 +1467,6 @@ def test_parent_choosing(self): set_replication=True, initdb_params=['--data-checksums']) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) @@ -1708,11 +1613,7 @@ def test_replica_via_basebackup(self): # restore stream backup self.restore_node(backup_dir, 'node', node) - xlog_dir = 'pg_wal' - if self.get_version(node) < 100000: - xlog_dir = 'pg_xlog' - - filepath = os.path.join(node.data_dir, xlog_dir, "00000002.history") + filepath = os.path.join(node.data_dir, 'pg_wal', "00000002.history") self.assertTrue( os.path.exists(filepath), "History file do not exists: {0}".format(filepath)) diff --git a/tests/restore.py b/tests/restore.py index 37f133573..d6246b3e2 100644 --- a/tests/restore.py +++ b/tests/restore.py @@ -361,10 +361,6 @@ def test_restore_to_lsn_inclusive(self): base_dir=os.path.join(module_name, fname, 'node'), initdb_params=['--data-checksums']) - if self.get_version(node) < self.version_to_num('10.0'): - self.del_test_dir(module_name, fname) - return - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -432,10 +428,6 @@ def test_restore_to_lsn_not_inclusive(self): base_dir=os.path.join(module_name, fname, 'node'), initdb_params=['--data-checksums']) - if self.get_version(node) < self.version_to_num('10.0'): - self.del_test_dir(module_name, fname) - return - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -2146,10 +2138,7 @@ def test_restore_target_new_options(self): with node.connect("postgres") as con: con.execute("INSERT INTO tbl0005 VALUES (1)") con.commit() - if self.get_version(node) > self.version_to_num('10.0'): - res = con.execute("SELECT pg_current_wal_lsn()") - else: - res = con.execute("SELECT pg_current_xlog_location()") + res = con.execute("SELECT pg_current_wal_lsn()") con.commit() con.execute("INSERT INTO tbl0005 VALUES (2)") @@ -2240,33 +2229,32 @@ def test_restore_target_new_options(self): node.slow_start() # Restore with recovery target lsn - if self.get_version(node) >= 100000: - node.cleanup() - self.restore_node( - backup_dir, 'node', node, - options=[ - '--recovery-target-lsn={0}'.format(target_lsn), - "--recovery-target-action=promote", - '--recovery-target-timeline=1', - ]) + node.cleanup() + self.restore_node( + backup_dir, 'node', node, + options=[ + '--recovery-target-lsn={0}'.format(target_lsn), + "--recovery-target-action=promote", + '--recovery-target-timeline=1', + ]) - with open(recovery_conf, 'r') as f: - recovery_conf_content = f.read() + with open(recovery_conf, 'r') as f: + recovery_conf_content = f.read() - self.assertIn( - "recovery_target_lsn = '{0}'".format(target_lsn), - recovery_conf_content) + self.assertIn( + "recovery_target_lsn = '{0}'".format(target_lsn), + recovery_conf_content) - self.assertIn( - "recovery_target_action = 'promote'", - recovery_conf_content) + self.assertIn( + "recovery_target_action = 'promote'", + recovery_conf_content) - self.assertIn( - "recovery_target_timeline = '1'", - recovery_conf_content) + self.assertIn( + "recovery_target_timeline = '1'", + recovery_conf_content) - node.slow_start() + node.slow_start() # Clean after yourself self.del_test_dir(module_name, fname) @@ -3197,119 +3185,42 @@ def test_missing_database_map(self): "postgres", "CREATE DATABASE backupdb") - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;" - ) - # >= 10 - else: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;" - ) + # PG >= 10 + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) if self.ptrack: # TODO why backup works without these grants ? diff --git a/tests/retention.py b/tests/retention.py index b0399a239..7bfff6b28 100644 --- a/tests/retention.py +++ b/tests/retention.py @@ -1575,11 +1575,6 @@ def test_window_error_backups_2(self): self.show_pb(backup_dir, 'node')[1]['id'] - if self.get_version(node) < 90600: - node.safe_psql( - 'postgres', - 'SELECT pg_catalog.pg_stop_backup()') - # Take DELTA backup self.backup_node( backup_dir, 'node', node, backup_type='delta', @@ -1599,10 +1594,6 @@ def test_retention_redundancy_overlapping_chains(self): base_dir=os.path.join(module_name, fname, 'node'), initdb_params=['--data-checksums']) - if self.get_version(node) < 90600: - self.del_test_dir(module_name, fname) - return unittest.skip('Skipped because ptrack support is disabled') - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1649,10 +1640,6 @@ def test_retention_redundancy_overlapping_chains_1(self): base_dir=os.path.join(module_name, fname, 'node'), initdb_params=['--data-checksums']) - if self.get_version(node) < 90600: - self.del_test_dir(module_name, fname) - return unittest.skip('Skipped because ptrack support is disabled') - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) diff --git a/tests/validate.py b/tests/validate.py index 22a03c3be..7cdc0e92e 100644 --- a/tests/validate.py +++ b/tests/validate.py @@ -1757,14 +1757,9 @@ def test_validate_corrupt_wal_between_backups(self): con.commit() target_xid = res[0][0] - if self.get_version(node) < self.version_to_num('10.0'): - walfile = node.safe_psql( - 'postgres', - 'select pg_xlogfile_name(pg_current_xlog_location())').decode('utf-8').rstrip() - else: - walfile = node.safe_psql( - 'postgres', - 'select pg_walfile_name(pg_current_wal_lsn())').decode('utf-8').rstrip() + walfile = node.safe_psql( + 'postgres', + 'select pg_walfile_name(pg_current_wal_lsn())').decode('utf-8').rstrip() if self.archive_compress: walfile = walfile + '.gz' @@ -3506,12 +3501,8 @@ def test_corrupt_pg_control_via_resetxlog(self): backup_id = self.backup_node(backup_dir, 'node', node) - if self.get_version(node) < 100000: - pg_resetxlog_path = self.get_bin_path('pg_resetxlog') - wal_dir = 'pg_xlog' - else: - pg_resetxlog_path = self.get_bin_path('pg_resetwal') - wal_dir = 'pg_wal' + pg_resetxlog_path = self.get_bin_path('pg_resetwal') + wal_dir = 'pg_wal' os.mkdir( os.path.join( From 4b08603d5a42a9d40b7e6b805f7c85b92593850d Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 12 Sep 2022 12:24:25 +0300 Subject: [PATCH 045/339] [PBCKP-245] fu_utils: compilable under MinGW --- Makefile | 4 +- src/fu_util/CMakeLists.txt | 24 ++++++++- src/fu_util/fm_util.h | 86 ++++++++++++++---------------- src/fu_util/fo_obj.h | 1 - src/fu_util/ft_array.inc.h | 11 +++- src/fu_util/ft_search.inc.h | 7 ++- src/fu_util/ft_util.h | 25 +++++---- src/fu_util/impl/fo_impl.c | 30 +++++------ src/fu_util/impl/fo_impl2.h | 5 +- src/fu_util/impl/ft_impl.c | 82 +++++++++++++++++++++------- src/fu_util/test/CMakeLists.txt | 7 +++ src/fu_util/test/obj1.c | 1 + src/fu_util/test/qsort/qsort.inc.c | 1 - 13 files changed, 179 insertions(+), 105 deletions(-) diff --git a/Makefile b/Makefile index 2b663f59b..21553f97c 100644 --- a/Makefile +++ b/Makefile @@ -51,14 +51,14 @@ BORROWED_C_SRC := \ src/bin/pg_basebackup/streamutil.c \ src/bin/pg_basebackup/walmethods.c -OBJS += src/fu_util/impl/ft_impl.o src/fu_util/impl/fo_impl.o - BORROW_DIR := src/borrowed BORROWED_H := $(addprefix $(BORROW_DIR)/, $(notdir $(BORROWED_H_SRC))) BORROWED_C := $(addprefix $(BORROW_DIR)/, $(notdir $(BORROWED_C_SRC))) OBJS += $(patsubst %.c, %.o, $(BORROWED_C)) EXTRA_CLEAN := $(BORROWED_H) $(BORROWED_C) $(BORROW_DIR) borrowed.mk +OBJS += src/fu_util/impl/ft_impl.o src/fu_util/impl/fo_impl.o + # off-source build support ifneq ($(abspath $(CURDIR))/, $(top_pbk_srcdir)) VPATH := $(top_pbk_srcdir) diff --git a/src/fu_util/CMakeLists.txt b/src/fu_util/CMakeLists.txt index 6752d5dd2..a5426df42 100644 --- a/src/fu_util/CMakeLists.txt +++ b/src/fu_util/CMakeLists.txt @@ -5,6 +5,7 @@ set(CMAKE_C_STANDARD 99) set(CMAKE_C_EXTENSIONS true) include(CheckCSourceCompiles) +include(CheckFunctionExists) add_library(fu_utils impl/ft_impl.c impl/fo_impl.c) @@ -12,12 +13,21 @@ set(THREADS_PREFER_PTHREAD_FLAG ON) find_package(Threads REQUIRED) target_link_libraries(fu_utils PRIVATE Threads::Threads) +if(CMAKE_USE_PTHREADS_INIT) + target_compile_definitions(fu_utils PRIVATE USE_PTHREADS) +else() + message(FATAL_ERROR "Need pthread support to build") +endif() + +CHECK_FUNCTION_EXISTS(strerror_r HAVE_STRERROR_R) + # Detect for installed beautiful https://github.com/ianlancetaylor/libbacktrace include_directories(.) if(NOT CMAKE_C_COMPILER MATCHES tcc) find_library(LIBBACKTRACE backtrace) if(LIBBACKTRACE) set(CMAKE_REQUIRED_LIBRARIES backtrace) + target_link_libraries(fu_utils PRIVATE backtrace) check_c_source_compiles(" #include int main(void) { @@ -30,11 +40,21 @@ if(NOT CMAKE_C_COMPILER MATCHES tcc) endif() endif() endif() +check_include_file(execinfo.h HAVE_EXECINFO_H) + +set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fexceptions") + +if(HAVE_EXECINFO_H) + target_compile_definitions(fu_utils PRIVATE HAVE_EXECINFO_H) +endif() +if(HAVE_STRERROR_R) + target_compile_definitions(fu_utils PRIVATE HAVE_STRERROR_R) +endif() + configure_file(fu_utils_cfg.h.in fu_utils_cfg.h) target_include_directories(fu_utils INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}") target_include_directories(fu_utils PRIVATE "${PROJECT_BINARY_DIR}") -target_link_libraries(fu_utils PUBLIC backtrace) install(TARGETS fu_utils DESTINATION lib) install(FILES fm_util.h ft_util.h fo_obj.h @@ -43,4 +63,4 @@ install(FILES fm_util.h ft_util.h fo_obj.h DESTINATION include/fu_utils) install(FILES impl/ft_impl.h impl/fo_impl.h DESTINATION include/fu_utils/impl) -add_subdirectory(test) \ No newline at end of file +add_subdirectory(test) diff --git a/src/fu_util/fm_util.h b/src/fu_util/fm_util.h index 11d96682d..18a971aa7 100644 --- a/src/fu_util/fm_util.h +++ b/src/fu_util/fm_util.h @@ -24,6 +24,9 @@ /****************************************/ // LOGIC +#define fm_true 1 +#define fm_false 0 + #define fm_compl(v) fm_cat(fm_compl_, v) #define fm_compl_0 1 #define fm_compl_1 0 @@ -81,38 +84,56 @@ #define fm_tail(...) fm__tail(__VA_ARGS__) #define fm__tail(x, ...) __VA_ARGS__ -#define fm_or_default(...) \ - fm_iif(fm_va_01(__VA_ARGS__))(__VA_ARGS__) #define fm_va_single(...) fm__va_single(__VA_ARGS__, fm__comma) #define fm_va_many(...) fm__va_many(__VA_ARGS__, fm__comma) #define fm__va_single(x, y, ...) fm__va_result(y, 1, 0) #define fm__va_many(x, y, ...) fm__va_result(y, 0, 1) -#define fm__va_result(x, y, res, ...) res +#define fm__va_result(...) fm__va_result_fin(__VA_ARGS__) +#define fm__va_result_fin(x, y, res, ...) res #define fm_no_va fm_is_empty #define fm_va_01 fm_isnt_empty -#define fm_va_01n(...) fm_cat3(fm__va_01n_, fm__isnt_empty(__VA_ARGS__), fm_va_many(__VA_ARGS__)) -#define fm__va_01n_00 0 -#define fm__va_01n_10 1 -#define fm__va_01n_11 n -#if !__STRICT_ANSI__ +#ifndef FM_USE_STRICT + #if defined(__STRICT_ANSI__) || defined(_MSC_VER) /* well, clang-cl doesn't allow to distinguish std mode */ + #define FM_USE_STRICT + #endif +#endif + +#ifndef FM_USE_STRICT #define fm_is_empty(...) fm__is_empty(__VA_ARGS__) #define fm__is_empty(...) fm_va_single(~, ##__VA_ARGS__) #define fm_isnt_empty(...) fm__isnt_empty(__VA_ARGS__) #define fm__isnt_empty(...) fm_va_many(~, ##__VA_ARGS__) + +#define fm_va_01n(...) fm_cat3(fm__va_01n_, fm__isnt_empty(__VA_ARGS__), fm_va_many(__VA_ARGS__)) +#define fm__va_01n_00 0 +#define fm__va_01n_10 1 +#define fm__va_01n_11 n + +#define fm_when_isnt_empty(...) fm_cat(fm__when_, fm__isnt_empty(__VA_ARGS__)) #else #define fm_is_empty(...) fm_and(fm__is_emptyfirst(__VA_ARGS__), fm_va_single(__VA_ARGS__)) #define fm_isnt_empty(...) fm_nand(fm__is_emptyfirst(__VA_ARGS__), fm_va_single(__VA_ARGS__)) #define fm__is_emptyfirst(x, ...) fm_iif(fm_is_tuple(x))(0)(fm__is_emptyfirst_impl(x)) -#define fm__is_emptyfirst_impl(x,...) fm_tuple_2((\ - fm__is_emptyfirst_do1 x (fm__is_emptyfirst_do2), 1, 0)) +#define fm__is_emptyfirst_impl(x,...) fm__va_result(\ + fm__is_emptyfirst_do1 x (fm__is_emptyfirst_do2), 1, 0) #define fm__is_emptyfirst_do1(F) F() #define fm__is_emptyfirst_do2(...) , + +#define fm_when_isnt_empty(...) fm_cat(fm__when_, fm_isnt_empty(__VA_ARGS__)) + +#define fm_va_01n(...) fm_cat3(fm__va_01n_, fm__is_emptyfirst(__VA_ARGS__), fm_va_many(__VA_ARGS__)) +#define fm__va_01n_10 0 +#define fm__va_01n_00 1 +#define fm__va_01n_01 n +#define fm__va_01n_11 n #endif -#define fm_when_isnt_empty(...) fm_cat(fm__when_, fm__isnt_empty(__VA_ARGS__)) +#define fm_or_default(...) \ + fm_iif(fm_va_01(__VA_ARGS__))(__VA_ARGS__) + #define fm_va_comma(...) \ fm_when_isnt_empty(__VA_ARGS__)(fm__comma) #define fm_va_comma_fun(...) \ @@ -127,23 +148,6 @@ #define fm__is_tuple_help(...) , #define fm__is_tuple_(...) fm__is_tuple_choose(__VA_ARGS__) -#define fm_tuple_expand(x) fm_expand x -#define fm_tuple_tag(x) fm_head x -#define fm_tuple_data(x) fm_tail x -#define fm_tuple_0(x) fm_head x -#define fm_tuple_1(x) fm__tuple_1 x -#define fm__tuple_1(_0, _1, ...) _1 -#define fm_tuple_2(x) fm__tuple_2 x -#define fm__tuple_2(_0, _1, _2, ...) _2 - -#define fm_tuple_tag_or_0(x) fm__tuple_tag_or_0_(fm__tuple_tag_or_0_help x, 0) -#define fm__tuple_tag_or_0_(...) fm__tuple_tag_or_0_choose(__VA_ARGS__) -#define fm__tuple_tag_or_0_choose(a,x,...) x -#define fm__tuple_tag_or_0_help(tag, ...) , tag - -#define fm_dispatch_tag_or_0(prefix, x) \ - fm_cat(prefix, fm_tuple_tag_or_0(x)) - /****************************************/ // Iteration @@ -160,20 +164,18 @@ // recursion handle : delay macro expansion to next recursion iteration #define fm_recurs(id) id fm_empty fm_empty() () -#define fm_recurs2(a,b) fm_cat fm_empty fm_empty() () (a,b) +#define fm_recurs2(a,b) fm_cat fm_empty() (a,b) #define fm_defer(id) id fm_empty() #define fm_foreach_join(join, macro, ...) \ - fm_foreach_join_(fm_empty, join, macro, __VA_ARGS__) -#define fm_foreach_join_(join1, join2, macro, ...) \ - fm_cat(fm_foreach_join_, fm_va_01n(__VA_ARGS__))(join1, join2, macro, __VA_ARGS__) + fm_cat(fm_foreach_join_, fm_va_01n(__VA_ARGS__))(fm_empty, join, macro, __VA_ARGS__) #define fm_foreach_join_0(join1, join2, macro, ...) #define fm_foreach_join_1(join1, join2, macro, x) \ join1() macro(x) #define fm_foreach_join_n(join1, join2, macro, x, y, ...) \ join1() macro(x) \ join2() macro(y) \ - fm_recurs2(fm_, foreach_join_) (join2, join2, macro, __VA_ARGS__) + fm_recurs2(fm_foreach_join_, fm_va_01n(__VA_ARGS__))(join2, join2, macro, __VA_ARGS__) #define fm_foreach(macro, ...) \ fm_foreach_join(fm_empty, macro, __VA_ARGS__) @@ -181,16 +183,14 @@ fm_foreach_join(fm_comma, macro, __VA_ARGS__) #define fm_foreach_arg_join(join, macro, arg, ...) \ - fm_foreach_arg_join_(fm_empty, join, macro, arg, __VA_ARGS__) -#define fm_foreach_arg_join_(join1, join2, macro, arg, ...) \ - fm_cat(fm_foreach_arg_join_, fm_va_01n(__VA_ARGS__))(join1, join2, macro, arg, __VA_ARGS__) + fm_cat(fm_foreach_arg_join_, fm_va_01n(__VA_ARGS__))(fm_empty, join, macro, arg, __VA_ARGS__) #define fm_foreach_arg_join_0(join1, join2, macro, ...) #define fm_foreach_arg_join_1(join1, join2, macro, arg, x) \ join1() macro(arg, x) #define fm_foreach_arg_join_n(join1, join2, macro, arg, x, y, ...) \ join1() macro(arg, x) \ join2() macro(arg, y) \ - fm_recurs2(fm_, foreach_arg_join_) (join2, join2, macro, arg, __VA_ARGS__) + fm_recurs2(fm_foreach_arg_join_, fm_va_01n(__VA_ARGS__))(join1, join2, macro, arg, __VA_ARGS__) #define fm_foreach_arg(macro, arg, ...) \ fm_foreach_arg_join(fm_empty, macro, arg, __VA_ARGS__) @@ -198,16 +198,14 @@ fm_foreach_arg_join(fm_comma, macro, arg, __VA_ARGS__) #define fm_foreach_tuple_join(join, macro, ...) \ - fm_foreach_tuple_join_(fm_empty, join, macro, __VA_ARGS__) -#define fm_foreach_tuple_join_(join1, join2, macro, ...) \ - fm_cat(fm_foreach_tuple_join_, fm_va_01n(__VA_ARGS__))(join1, join2, macro, __VA_ARGS__) + fm_cat(fm_foreach_tuple_join_, fm_va_01n(__VA_ARGS__))(fm_empty, join, macro, __VA_ARGS__) #define fm_foreach_tuple_join_0(join1, join2, macro, ...) #define fm_foreach_tuple_join_1(join1, join2, macro, x) \ join1() macro x #define fm_foreach_tuple_join_n(join1, join2, macro, x, y, ...) \ join1() macro x \ join2() macro y \ - fm_recurs2(fm_, foreach_tuple_join_) (join2, join2, macro, __VA_ARGS__) + fm_recurs2(fm_foreach_tuple_join_, fm_va_01n(__VA_ARGS__))(join2, join2, macro, __VA_ARGS__) #define fm_foreach_tuple(macro, ...) \ fm_foreach_tuple_join(fm_empty, macro, __VA_ARGS__) @@ -215,16 +213,14 @@ fm_foreach_tuple_join(fm_comma, macro, __VA_ARGS__) #define fm_foreach_tuple_arg_join(join, macro, arg, ...) \ - fm_foreach_tuple_arg_join_(fm_empty, join, macro, arg, __VA_ARGS__) -#define fm_foreach_tuple_arg_join_(join1, join2, macro, arg, ...) \ - fm_cat(fm_foreach_tuple_arg_join_, fm_va_01n(__VA_ARGS__))(join1, join2, macro, arg, __VA_ARGS__) + fm_cat(fm_foreach_tuple_arg_join_, fm_va_01n(__VA_ARGS__))(fm_empty, join, macro, arg, __VA_ARGS__) #define fm_foreach_tuple_arg_join_0(join1, join2, macro, ...) #define fm_foreach_tuple_arg_join_1(join1, join2, macro, arg, x) \ join1() fm_apply(macro, arg, fm_expand x) #define fm_foreach_tuple_arg_join_n(join1, join2, macro, arg, x, y, ...) \ join1() fm_apply(macro, arg, fm_expand x) \ join2() fm_apply(macro, arg, fm_expand y) \ - fm_recurs2(fm_, foreach_tuple_arg_join_) (join2, join2, macro, arg, __VA_ARGS__) + fm_recurs2(fm_foreach_tuple_arg_join_, fm_va_01n(__VA_ARGS__))(join1, join2, macro, arg, __VA_ARGS__) #define fm_foreach_tuple_arg(macro, arg, ...) \ fm_foreach_tuple_arg_join(fm_empty, macro, arg, __VA_ARGS__) diff --git a/src/fu_util/fo_obj.h b/src/fu_util/fo_obj.h index 70d4ee6b9..6ad423dc6 100644 --- a/src/fu_util/fo_obj.h +++ b/src/fu_util/fo_obj.h @@ -7,7 +7,6 @@ typedef void* fobj_t; #include -#include /* * Pointer to "object*. diff --git a/src/fu_util/ft_array.inc.h b/src/fu_util/ft_array.inc.h index 57d7cad42..847a6393d 100644 --- a/src/fu_util/ft_array.inc.h +++ b/src/fu_util/ft_array.inc.h @@ -1,5 +1,7 @@ /* vim: set expandtab autoindent cindent ts=4 sw=4 sts=4 */ -#include +#ifndef FU_UTIL_H +#error "ft_util.h should be included" +#endif /* * Accepts 2 macroses: @@ -176,7 +178,12 @@ #define ft_array_walk fm_cat(ft_array_pref, _walk) #define ft_array_walk_r fm_cat(ft_array_pref, _walk_r) -#define HUGE_SIZE ((uint64_t)UINT_MAX << 16) +#if __SIZEOF_SIZE_T__ < 8 +#define HUGE_SIZE ((size_t)UINT_MAX >> 2) +#else +#define HUGE_SIZE ((size_t)UINT_MAX << 16) +#endif + #ifndef NDEBUG /* try to catch uninitialized vars */ #define ft_slice_invariants(slc) \ diff --git a/src/fu_util/ft_search.inc.h b/src/fu_util/ft_search.inc.h index b567e11bf..149874cd6 100644 --- a/src/fu_util/ft_search.inc.h +++ b/src/fu_util/ft_search.inc.h @@ -1,3 +1,8 @@ +/* vim: set expandtab autoindent cindent ts=4 sw=4 sts=4 */ +#ifndef FU_UTIL_H +#error "ft_util.h should be included" +#endif + /* * Sort template. * Accepts four macrosses: @@ -39,8 +44,6 @@ * */ -#include - #define ft_func_bsearch fm_cat(ft_bsearch_, FT_SEARCH) #define ft_func_bsearch_r fm_cat3(ft_bsearch_, FT_SEARCH, _r) #define ft_func_search fm_cat(ft_search_, FT_SEARCH) diff --git a/src/fu_util/ft_util.h b/src/fu_util/ft_util.h index 56a0d05d2..084eabf9b 100644 --- a/src/fu_util/ft_util.h +++ b/src/fu_util/ft_util.h @@ -1,24 +1,29 @@ /* vim: set expandtab autoindent cindent ts=4 sw=4 sts=4 */ #ifndef FU_UTIL_H -#define FU_UTIL_H +#define FU_UTIL_H 1 #include #include +#include #include #include #include +#include +#include /* trick to find ssize_t even on windows and strict ansi mode */ #if defined(_MSC_VER) #include typedef SSIZE_T ssize_t; -#else -#include +#define SSIZE_MAX ((ssize_t)((SIZE_MAX) >> 1)) + +#if !defined(WIN32) && defined(_WIN32) +#define WIN32 _WIN32 +#endif + #endif #include #include #include -#include - #ifdef __GNUC__ #define ft_gcc_const __attribute__((const)) @@ -29,7 +34,7 @@ typedef SSIZE_T ssize_t; #define ft_gcc_malloc(free, idx) __attribute__((malloc)) #endif #define ft_unused __attribute__((unused)) -#define ft_gnu_printf(fmt, arg) __attribute__((format(printf,fmt,arg))) +#define ft_gnu_printf(fmt, arg) __attribute__((format(gnu_printf,fmt,arg))) #define ft_likely(x) __builtin_expect(!!(x), 1) #define ft_unlikely(x) __builtin_expect(!!(x), 0) #define ft_always_inline __attribute__((always_inline)) @@ -103,6 +108,7 @@ typedef void ft_gnu_printf(4, 0) (*ft_log_hook_t)(enum FT_LOG_LEVEL, /* * Initialize logging in main executable file. * Pass custom hook or NULL. + * In MinGW if built with libbacktrace, pass executable path (argv[0]). */ #define ft_init_log(hook) ft__init_log(hook, __FILE__) @@ -135,7 +141,7 @@ const char* ft__truncate_log_filename(const char *file); #define ft_dbg_enabled() ft__dbg_enabled() #define ft_dbg_assert(x, ...) ft__dbg_assert(x, #x, __VA_ARGS__) -#define ft_assert(x, ...) ft__assert(x, #x, __VA_ARGS__) +#define ft_assert(x, ...) ft__assert(x, #x, ##__VA_ARGS__) #define ft_assyscall(syscall, ...) ft__assyscall(syscall, fm_uniq(res), __VA_ARGS__) /* threadsafe strerror */ @@ -305,13 +311,14 @@ typedef struct ft_bytes_t { } ft_bytes_t; ft_inline ft_bytes_t ft_bytes(void* ptr, size_t len) { - return (ft_bytes_t){.ptr = ptr, .len = len}; + return (ft_bytes_t){.ptr = (char*)ptr, .len = len}; } ft_inline void ft_bytes_consume(ft_bytes_t *bytes, size_t cut); ft_inline void ft_bytes_move(ft_bytes_t *dest, ft_bytes_t *src); // String utils +extern size_t ft_strlcpy(char *dest, const char* src, size_t dest_size); /* * Concat strings regarding destination buffer size. * Note: if dest already full and doesn't contain \0n character, then fatal log is issued. @@ -411,7 +418,7 @@ extern bool ft_strbuf_vcatf (ft_strbuf_t *buf, const char *fmt, va_list * Use it if format string comes from user. */ ft_gnu_printf(3, 0) -extern bool ft_strbuf_vcatf_err (ft_strbuf_t *buf, bool err[static 1], +extern bool ft_strbuf_vcatf_err (ft_strbuf_t *buf, bool err[1], const char *fmt, va_list args); /* * Returns string which points into the buffer. diff --git a/src/fu_util/impl/fo_impl.c b/src/fu_util/impl/fo_impl.c index bbc49ab7f..63fa372fb 100644 --- a/src/fu_util/impl/fo_impl.c +++ b/src/fu_util/impl/fo_impl.c @@ -5,13 +5,9 @@ #include #include -#ifdef WIN32 -#define __thread __declspec(thread) -#endif -#include - #include -#include + +#include /* * We limits total number of methods, klasses and method implementations. @@ -650,7 +646,7 @@ fobjStr* fobj_newstr(ft_str_t s, enum FOBJ_STR_ALLOC ownership) { fobjStr *str; #if __SIZEOF_POINTER__ < 8 - ft_assert(size < (1<<30)-2); + ft_assert(s.len < (1<<30)-2); #else ft_assert(s.len < UINT32_MAX-2); #endif @@ -871,13 +867,13 @@ fobj_format_int(ft_strbuf_t *buf, uint64_t i, bool _signed, const char *fmt) { /* now add real suitable format */ switch (base) { - case 'x': strcat(tfmt + fmtlen, PRIx64); break; - case 'X': strcat(tfmt + fmtlen, PRIX64); break; - case 'o': strcat(tfmt + fmtlen, PRIo64); break; - case 'u': strcat(tfmt + fmtlen, PRIu64); break; - case 'd': strcat(tfmt + fmtlen, PRId64); break; + case 'x': ft_strlcat(tfmt, PRIx64, sizeof(tfmt)); break; + case 'X': ft_strlcat(tfmt, PRIX64, sizeof(tfmt)); break; + case 'o': ft_strlcat(tfmt, PRIo64, sizeof(tfmt)); break; + case 'u': ft_strlcat(tfmt, PRIu64, sizeof(tfmt)); break; + case 'd': ft_strlcat(tfmt, PRId64, sizeof(tfmt)); break; default: - case 'i': strcat(tfmt + fmtlen, PRIi64); break; + case 'i': ft_strlcat(tfmt, PRIi64, sizeof(tfmt)); break; } switch (base) { @@ -1082,11 +1078,11 @@ fobj__format_errmsg(const char* msg, fobj_err_kv_t *kvs) { "ident is too long in message \"%s\"", msg); ft_assert(formatdelim == NULL || closebrace - formatdelim <= 31, "format is too long in message \"%s\"", msg); - strncpy(ident, cur, identlen); + memcpy(ident, cur, identlen); ident[identlen] = 0; formatlen = formatdelim ? closebrace - (formatdelim+1) : 0; if (formatlen > 0) { - strncpy(format, formatdelim + 1, formatlen); + memcpy(format, formatdelim + 1, formatlen); } format[formatlen] = 0; kv = kvs; @@ -1293,11 +1289,11 @@ fobj_printkv(const char *fmt, ft_slc_fokv_t kvs) { "ident is too long in format \"%s\"", fmt); ft_assert(formatdelim == NULL || closebrace - formatdelim <= 31, "format is too long in format \"%s\"", fmt); - strncpy(ident, cur, identlen); + memcpy(ident, cur, identlen); ident[identlen] = 0; formatlen = formatdelim ? closebrace - (formatdelim+1) : 0; if (formatlen > 0) { - strncpy(format, formatdelim + 1, formatlen); + memcpy(format, formatdelim + 1, formatlen); } format[formatlen] = 0; i = ft_search_fokv(kvs.ptr, kvs.len, ident, fobj_fokv_cmpc); diff --git a/src/fu_util/impl/fo_impl2.h b/src/fu_util/impl/fo_impl2.h index 916714997..1cac933a0 100644 --- a/src/fu_util/impl/fo_impl2.h +++ b/src/fu_util/impl/fo_impl2.h @@ -2,10 +2,6 @@ #ifndef FOBJ_OBJ_PRIV2_H #define FOBJ_OBJ_PRIV2_H -#include -#include -#include - enum fobjStrType { FOBJ_STR_SMALL = 1, FOBJ_STR_UNOWNED, @@ -57,6 +53,7 @@ fobj_getstr(fobjStr *str) { return ft_str(str->ptr.ptr, str->ptr.len); default: ft_log(FT_FATAL, "Unknown fobj_str type %d", str->type); + return ft_str(NULL, 0); } } diff --git a/src/fu_util/impl/ft_impl.c b/src/fu_util/impl/ft_impl.c index 097171e86..1897e6bec 100644 --- a/src/fu_util/impl/ft_impl.c +++ b/src/fu_util/impl/ft_impl.c @@ -1,25 +1,35 @@ /* vim: set expandtab autoindent cindent ts=4 sw=4 sts=4 */ -#include +#include + #include -#include -#include #include +#include +#if !defined(WIN32) || defined(__MINGW64__) || defined(__MINGW32__) #include #include -#include +#else +#define WIN32_LEAN_AND_MEAN + +#include +#include +#include +#undef small +#include +#include +#include +#undef near +#endif + #ifdef HAVE_LIBBACKTRACE #include -#else +#if defined(__MINGW32__) || defined(__MINGW64__) +#include +#endif +#elif HAVE_EXECINFO_H #include #endif -#ifdef WIN32 -#define __thread __declspec(thread) -#else #include -#endif - -#include #define FT_LOG_MAX_FILES (1<<12) @@ -100,12 +110,22 @@ ft_strlcat(char *dest, const char* src, size_t dest_size) { ft_assert(dest_null, "destination has no zero byte"); if (dest_len < dest_size-1) { size_t cpy_len = dest_size - dest_len - 1; - strncpy(dest+dest_len, src, cpy_len); + cpy_len = ft_min(cpy_len, strlen(src)); + memcpy(dest+dest_len, src, cpy_len); dest[dest_len + cpy_len] = '\0'; } return dest_len + strlen(src); } +size_t +ft_strlcpy(char *dest, const char* src, size_t dest_size) { + size_t cpy_len = dest_size - 1; + cpy_len = ft_min(cpy_len, strlen(src)); + memcpy(dest, src, cpy_len); + dest[cpy_len] = '\0'; + return strlen(src); +} + ft_str_t ft_vasprintf(const char *fmt, va_list args) { ft_strbuf_t buf = ft_strbuf_zero(); @@ -302,9 +322,23 @@ ft__base_log_filename(const char *file) { static struct backtrace_state * volatile ft_btstate = NULL; static pthread_once_t ft_btstate_once = PTHREAD_ONCE_INIT; + +static void +ft_backtrace_err(void *data, const char *msg, int errnum) +{ + fprintf(stderr, "ft_backtrace_err %s %d\n", msg, errnum); +} + static void ft_backtrace_init(void) { - __atomic_store_n(&ft_btstate, backtrace_create_state(NULL, 0, NULL, NULL), + const char *app = NULL; +#if defined(__MINGW32__) || defined(__MINGW64__) + static char appbuf[2048] = {0}; + /* 2048 should be enough, don't check error */ + GetModuleFileNameA(0, appbuf, sizeof(appbuf)-1); + app = appbuf; +#endif + __atomic_store_n(&ft_btstate, backtrace_create_state(app, 1, ft_backtrace_err, NULL), __ATOMIC_RELEASE); } @@ -315,9 +349,9 @@ ft_backtrace_add(void *data, uintptr_t pc, struct ft_strbuf_t *buf = data; ssize_t sz; if (filename == NULL) - return 1; - return ft_strbuf_catf(buf, "\n%s:%-4d %s", - ft__truncate_log_filename(filename), lineno, function); + return 0; + return !ft_strbuf_catf(buf, "\n\t%s:%-4d\t%s", + ft__truncate_log_filename(filename), lineno, function ? function : "(unknown)"); } #endif @@ -355,9 +389,9 @@ ft_default_log(enum FT_LOG_LEVEL level, ft_source_position_t srcpos, #ifdef HAVE_LIBBACKTRACE if (__atomic_load_n(&ft_btstate, __ATOMIC_ACQUIRE) == NULL) pthread_once(&ft_btstate_once, ft_backtrace_init); - - backtrace_full(ft_btstate, 1, ft_backtrace_add, NULL, &buf); -#else + if (ft_btstate) + backtrace_full(ft_btstate, 0, ft_backtrace_add, NULL, &buf); +#elif defined(HAVE_EXECINFO_H) void *backtr[32] = {0}; char **syms = NULL; int i, n; @@ -414,7 +448,15 @@ ft__log_fatal(ft_source_position_t srcpos, const char* error, const char* ft__strerror(int eno, char *buf, size_t len) { -#if !_GNU_SOURCE && (_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) +#ifndef HAVE_STRERROR_R + char *sbuf = strerror(eno); + + if (sbuf == NULL) /* can this still happen anywhere? */ + return NULL; + /* To minimize thread-unsafety hazard, copy into caller's buffer */ + ft_strlcpy(buf, sbuf, len); + return buf; +#elif !_GNU_SOURCE && (_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) int saveno = errno; int e = strerror_r(eno, buf, len); if (e != 0) { diff --git a/src/fu_util/test/CMakeLists.txt b/src/fu_util/test/CMakeLists.txt index 06f86effc..05eea86c6 100644 --- a/src/fu_util/test/CMakeLists.txt +++ b/src/fu_util/test/CMakeLists.txt @@ -1,5 +1,10 @@ cmake_minimum_required(VERSION 3.11) +add_executable(fm fm.c) +add_executable(fm1 fm.c) + +target_compile_options(fm1 PRIVATE -DFM_USE_STRICT=1) + add_executable(array array.c) target_link_libraries(array fu_utils) @@ -20,6 +25,8 @@ target_link_libraries(obj1 fu_utils) enable_testing() +add_test(NAME fm COMMAND fm) +add_test(NAME fm1 COMMAND fm1) add_test(NAME array COMMAND array) add_test(NAME bsearch COMMAND bsearch) add_test(NAME fuprintf COMMAND fuprintf) diff --git a/src/fu_util/test/obj1.c b/src/fu_util/test/obj1.c index db5f9b0b0..faa7aafa5 100644 --- a/src/fu_util/test/obj1.c +++ b/src/fu_util/test/obj1.c @@ -285,6 +285,7 @@ int main(int argc, char** argv) { ft_assert(fobj_streq_c(strf, "Some scary things cost > $$12.4800 $$"), "String is '%s'", $tostr(strf)); + ft_log(FT_ERROR, "and try backtrace"); logf("BEFORE EXIT"); } diff --git a/src/fu_util/test/qsort/qsort.inc.c b/src/fu_util/test/qsort/qsort.inc.c index c801ae52a..2a53ae93f 100644 --- a/src/fu_util/test/qsort/qsort.inc.c +++ b/src/fu_util/test/qsort/qsort.inc.c @@ -20,7 +20,6 @@ Engineering a sort function; Jon Bentley and M. Douglas McIlroy; Software - Practice and Experience; Vol. 23 (11), 1249-1265, 1993. */ -#include #include #include #include From 5e1dd7ee4227739479a0e83092bd1ae8899fcc86 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 15 Aug 2022 15:40:14 +0300 Subject: [PATCH 046/339] [PBCKP-245] don't include libpq-int.h There's really no need to. But doing it pulls bad dependency. --- src/backup.c | 4 +++- src/catchup.c | 4 +++- src/pg_probackup.h | 1 - src/utils/file.c | 1 + 4 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/backup.c b/src/backup.c index 15f1a4d1c..0363d7721 100644 --- a/src/backup.c +++ b/src/backup.c @@ -2179,6 +2179,7 @@ check_external_for_tablespaces(parray *external_list, PGconn *backup_conn) PGresult *res; int i = 0; int j = 0; + int ntups; char *tablespace_path = NULL; char *query = "SELECT pg_catalog.pg_tablespace_location(oid) " "FROM pg_catalog.pg_tablespace " @@ -2190,7 +2191,8 @@ check_external_for_tablespaces(parray *external_list, PGconn *backup_conn) if (!res) elog(ERROR, "Failed to get list of tablespaces"); - for (i = 0; i < res->ntups; i++) + ntups = PQntuples(res); + for (i = 0; i < ntups; i++) { tablespace_path = PQgetvalue(res, i, 0); Assert (strlen(tablespace_path) > 0); diff --git a/src/catchup.c b/src/catchup.c index 0f6e36b13..f91d199b3 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -230,6 +230,7 @@ catchup_check_tablespaces_existance_in_tbsmapping(PGconn *conn) { PGresult *res; int i; + int ntups; char *tablespace_path = NULL; const char *linked_path = NULL; char *query = "SELECT pg_catalog.pg_tablespace_location(oid) " @@ -241,7 +242,8 @@ catchup_check_tablespaces_existance_in_tbsmapping(PGconn *conn) if (!res) elog(ERROR, "Failed to get list of tablespaces"); - for (i = 0; i < res->ntups; i++) + ntups = PQntuples(res); + for (i = 0; i < ntups; i++) { tablespace_path = PQgetvalue(res, i, 0); Assert (strlen(tablespace_path) > 0); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 7ce455459..e8d1968e4 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -14,7 +14,6 @@ #include "postgres_fe.h" #include "libpq-fe.h" -#include "libpq-int.h" #include "access/xlog_internal.h" #include "utils/pg_crc.h" diff --git a/src/utils/file.c b/src/utils/file.c index 86977a19a..6e8b5e9f3 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -4,6 +4,7 @@ #include "pg_probackup.h" /* sys/stat.h must be included after pg_probackup.h (see problems with compilation for windows described in PGPRO-5750) */ #include +#include #include "file.h" #include "storage/checksum.h" From 3fa33c83dd05d6f166082d175f8a0f522009f66f Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 12 Sep 2022 16:20:03 +0300 Subject: [PATCH 047/339] [PBCKP-245] mingw: no need to define custom sleep/usleep Mingw defines usable them. --- src/utils/pgut.c | 14 -------------- src/utils/pgut.h | 5 ----- 2 files changed, 19 deletions(-) diff --git a/src/utils/pgut.c b/src/utils/pgut.c index f1b8da0b2..9a7b465ee 100644 --- a/src/utils/pgut.c +++ b/src/utils/pgut.c @@ -1061,20 +1061,6 @@ init_cancel_handler(void) SetConsoleCtrlHandler(consoleHandler, TRUE); } -int -sleep(unsigned int seconds) -{ - Sleep(seconds * 1000); - return 0; -} - -int -usleep(unsigned int usec) -{ - Sleep((usec + 999) / 1000); /* rounded up */ - return 0; -} - #undef select static int select_win32(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, const struct timeval * timeout) diff --git a/src/utils/pgut.h b/src/utils/pgut.h index 638259a3c..d8b5fec85 100644 --- a/src/utils/pgut.h +++ b/src/utils/pgut.h @@ -98,9 +98,4 @@ extern char *pgut_str_strip_trailing_filename(const char *filepath, const char * extern int wait_for_socket(int sock, struct timeval *timeout); extern int wait_for_sockets(int nfds, fd_set *fds, struct timeval *timeout); -#ifdef WIN32 -extern int sleep(unsigned int seconds); -extern int usleep(unsigned int usec); -#endif - #endif /* PGUT_H */ From 2fd47579a36db47c618d9824a309be02d8645e34 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 18 Sep 2022 11:11:48 +0300 Subject: [PATCH 048/339] [PBCKP-248] a bit more accurate ifdef just small refactoring. --- src/archive.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/archive.c b/src/archive.c index 0ebe5e504..782ef1b21 100644 --- a/src/archive.c +++ b/src/archive.c @@ -553,17 +553,17 @@ push_file_internal(const char *wal_file_name, const char *pg_xlog_dir, /* enable streaming compression */ if (is_compress) -#ifdef HAVE_LIBZ { +#ifdef HAVE_LIBZ pioFilter_i flt = pioGZCompressFilter(compress_level); err = pioCopy($reduce(pioWriteFlush, out), $reduce(pioRead, in), flt); - } - else #else - elog(ERROR, "Compression is requested, but not compiled it"); + elog(ERROR, "Compression is requested, but not compiled it"); #endif + } + else { err = pioCopy($reduce(pioWriteFlush, out), $reduce(pioRead, in)); From e180a360312bf4710eecd574a1169dc865080841 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 18 Sep 2022 11:11:57 +0300 Subject: [PATCH 049/339] [PBCKP-248] Use native mingw pthread Mingw pthread "implementation" works reasonably well. There's no need to use emulation. Mingw's gcc links with winpthread.dll automatically, no need to force flags. --- src/archive.c | 4 +- src/checkdb.c | 3 +- src/pg_probackup.c | 1 - src/pg_probackup.h | 8 ---- src/utils/logger.c | 6 +-- src/utils/thread.c | 92 +++------------------------------------------- src/utils/thread.h | 26 +++---------- 7 files changed, 19 insertions(+), 121 deletions(-) diff --git a/src/archive.c b/src/archive.c index 782ef1b21..693270fce 100644 --- a/src/archive.c +++ b/src/archive.c @@ -279,7 +279,7 @@ push_files(void *arg) int rc; archive_push_arg *args = (archive_push_arg *) arg; - my_thread_num = args->thread_num; + set_my_thread_num(args->thread_num); for (i = 0; i < parray_num(args->files); i++) { @@ -1011,7 +1011,7 @@ get_files(void *arg) char from_fullpath[MAXPGPATH]; archive_get_arg *args = (archive_get_arg *) arg; - my_thread_num = args->thread_num; + set_my_thread_num(args->thread_num); for (i = 0; i < parray_num(args->files); i++) { diff --git a/src/checkdb.c b/src/checkdb.c index 177fc3cc7..f344d29b4 100644 --- a/src/checkdb.c +++ b/src/checkdb.c @@ -293,7 +293,8 @@ check_indexes(void *arg) int i; check_indexes_arg *arguments = (check_indexes_arg *) arg; int n_indexes = 0; - my_thread_num = arguments->thread_num; + + set_my_thread_num(arguments->thread_num); if (arguments->index_list) n_indexes = parray_num(arguments->index_list); diff --git a/src/pg_probackup.c b/src/pg_probackup.c index b7308405c..a580fb3c2 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -75,7 +75,6 @@ bool no_color = false; bool show_color = true; bool is_archive_cmd = false; pid_t my_pid = 0; -__thread int my_thread_num = 1; bool progress = false; bool no_sync = false; time_t start_time = 0; diff --git a/src/pg_probackup.h b/src/pg_probackup.h index e8d1968e4..13d4b06f0 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -43,13 +43,6 @@ #include "pg_probackup_state.h" - -#ifdef WIN32 -#define __thread __declspec(thread) -#else -#include -#endif - /* Wrap the code that we're going to delete after refactoring in this define*/ #define REFACTORE_ME @@ -759,7 +752,6 @@ typedef struct StopBackupCallbackParams /* common options */ extern pid_t my_pid; -extern __thread int my_thread_num; extern int num_threads; extern bool stream_wal; extern bool show_color; diff --git a/src/utils/logger.c b/src/utils/logger.c index e58802e28..e49012368 100644 --- a/src/utils/logger.c +++ b/src/utils/logger.c @@ -344,7 +344,7 @@ elog_internal(int elevel, bool file_only, const char *message) if (format_file == JSON || format_console == JSON) { snprintf(str_pid_json, sizeof(str_pid_json), "%d", my_pid); - snprintf(str_thread_json, sizeof(str_thread_json), "[%d-1]", my_thread_num); + snprintf(str_thread_json, sizeof(str_thread_json), "[%d-1]", my_thread_num()); initPQExpBuffer(&show_buf); json_add_min(buf_json, JT_BEGIN_OBJECT); @@ -424,7 +424,7 @@ elog_internal(int elevel, bool file_only, const char *message) { char str_thread[64]; /* [Issue #213] fix pgbadger parsing */ - snprintf(str_thread, sizeof(str_thread), "[%d-1]:", my_thread_num); + snprintf(str_thread, sizeof(str_thread), "[%d-1]:", my_thread_num()); fprintf(stderr, "%s ", strfbuf); fprintf(stderr, "%s ", str_pid); @@ -498,7 +498,7 @@ elog_stderr(int elevel, const char *fmt, ...) strftime(strfbuf, sizeof(strfbuf), "%Y-%m-%d %H:%M:%S %Z", localtime(&log_time)); snprintf(str_pid, sizeof(str_pid), "%d", my_pid); - snprintf(str_thread_json, sizeof(str_thread_json), "[%d-1]", my_thread_num); + snprintf(str_thread_json, sizeof(str_thread_json), "[%d-1]", my_thread_num()); initPQExpBuffer(&show_buf); json_add_min(buf_json, JT_BEGIN_OBJECT); diff --git a/src/utils/thread.c b/src/utils/thread.c index 1c469bd29..1ad1e772e 100644 --- a/src/utils/thread.c +++ b/src/utils/thread.c @@ -17,97 +17,17 @@ */ bool thread_interrupted = false; -#ifdef WIN32 -DWORD main_tid = 0; -#else pthread_t main_tid = 0; -#endif -#ifdef WIN32 -#include - -typedef struct win32_pthread -{ - HANDLE handle; - void *(*routine) (void *); - void *arg; - void *result; -} win32_pthread; - -static long mutex_initlock = 0; - -static unsigned __stdcall -win32_pthread_run(void *arg) -{ - win32_pthread *th = (win32_pthread *)arg; - - th->result = th->routine(th->arg); - - return 0; -} - -int -pthread_create(pthread_t *thread, - pthread_attr_t *attr, - void *(*start_routine) (void *), - void *arg) -{ - int save_errno; - win32_pthread *th; - - th = (win32_pthread *)pg_malloc(sizeof(win32_pthread)); - th->routine = start_routine; - th->arg = arg; - th->result = NULL; - - th->handle = (HANDLE)_beginthreadex(NULL, 0, win32_pthread_run, th, 0, NULL); - if (th->handle == NULL) - { - save_errno = errno; - free(th); - return save_errno; - } - - *thread = th; - return 0; -} +static __thread int my_thread_num_var = 1; int -pthread_join(pthread_t th, void **thread_return) +my_thread_num(void) { - if (th == NULL || th->handle == NULL) - return errno = EINVAL; - - if (WaitForSingleObject(th->handle, INFINITE) != WAIT_OBJECT_0) - { - _dosmaperr(GetLastError()); - return errno; - } - - if (thread_return) - *thread_return = th->result; - - CloseHandle(th->handle); - free(th); - return 0; + return my_thread_num_var; } -#endif /* WIN32 */ - -int -pthread_lock(pthread_mutex_t *mp) +void +set_my_thread_num(int th) { -#ifdef WIN32 - if (*mp == NULL) - { - while (InterlockedExchange(&mutex_initlock, 1) == 1) - /* loop, another thread own the lock */ ; - if (*mp == NULL) - { - if (pthread_mutex_init(mp, NULL)) - return -1; - } - InterlockedExchange(&mutex_initlock, 0); - } -#endif - return pthread_mutex_lock(mp); + my_thread_num_var = th; } diff --git a/src/utils/thread.h b/src/utils/thread.h index 2eaa5fb45..a6c58f70e 100644 --- a/src/utils/thread.h +++ b/src/utils/thread.h @@ -10,32 +10,18 @@ #ifndef PROBACKUP_THREAD_H #define PROBACKUP_THREAD_H -#ifdef WIN32 -#include "postgres_fe.h" -#include "port/pthread-win32.h" - -/* Use native win32 threads on Windows */ -typedef struct win32_pthread *pthread_t; -typedef int pthread_attr_t; - -#define PTHREAD_MUTEX_INITIALIZER NULL //{ NULL, 0 } -#define PTHREAD_ONCE_INIT false +#if defined(WIN32) && !(defined(__MINGW64__) || defined(__MINGW32__) || defined(HAVE_PTHREAD)) +#error "Windows build supports only 'pthread' threading" +#endif -extern int pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg); -extern int pthread_join(pthread_t th, void **thread_return); -#else /* Use platform-dependent pthread capability */ #include -#endif - -#ifdef WIN32 -extern DWORD main_tid; -#else extern pthread_t main_tid; -#endif +#define pthread_lock(mp) pthread_mutex_lock(mp) extern bool thread_interrupted; -extern int pthread_lock(pthread_mutex_t *mp); +int my_thread_num(void); +void set_my_thread_num(int); #endif /* PROBACKUP_THREAD_H */ From 3acd1a72b421169ce316d0ed9301b94f557a6f14 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 13 Sep 2022 13:21:13 +0300 Subject: [PATCH 050/339] [PBCKP-245] don't include sys/stat.h so often Postgresql's 'port' library defines custom 'stat' and 'struct stat'. It conflicts with system one in MinGW. We had to include either one or another, but not both. It is easier to do if we include 'sys/stat.h' only once and only in non-win32 environment. --- src/backup.c | 1 - src/catalog.c | 1 - src/catchup.c | 1 - src/checkdb.c | 1 - src/data.c | 1 - src/dir.c | 1 - src/fetch.c | 1 - src/init.c | 1 - src/merge.c | 1 - src/pg_probackup.c | 2 -- src/restore.c | 1 - src/show.c | 1 - src/utils/file.c | 2 -- src/utils/file.h | 6 +++++- src/validate.c | 1 - 15 files changed, 5 insertions(+), 17 deletions(-) diff --git a/src/backup.c b/src/backup.c index 0363d7721..f28ff1abc 100644 --- a/src/backup.c +++ b/src/backup.c @@ -17,7 +17,6 @@ #include "pgtar.h" #include "streamutil.h" -#include #include #include diff --git a/src/catalog.c b/src/catalog.c index b4be159d1..212add4ca 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -13,7 +13,6 @@ #include #include -#include #include #include "utils/file.h" diff --git a/src/catchup.c b/src/catchup.c index f91d199b3..fa126b884 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -17,7 +17,6 @@ #include "pgtar.h" #include "streamutil.h" -#include #include #include diff --git a/src/checkdb.c b/src/checkdb.c index f344d29b4..f1a5fcf78 100644 --- a/src/checkdb.c +++ b/src/checkdb.c @@ -16,7 +16,6 @@ #include "pg_probackup.h" -#include #include #include diff --git a/src/data.c b/src/data.c index 17ae4b91a..6308b110c 100644 --- a/src/data.c +++ b/src/data.c @@ -16,7 +16,6 @@ #include "utils/file.h" #include -#include #ifdef HAVE_LIBZ #include diff --git a/src/dir.c b/src/dir.c index 0bcd60169..8704a8d2f 100644 --- a/src/dir.c +++ b/src/dir.c @@ -19,7 +19,6 @@ #include "catalog/pg_tablespace.h" #include -#include #include #include "utils/configuration.h" diff --git a/src/fetch.c b/src/fetch.c index bbea7bffe..980bf531b 100644 --- a/src/fetch.c +++ b/src/fetch.c @@ -10,7 +10,6 @@ #include "pg_probackup.h" -#include #include /* diff --git a/src/init.c b/src/init.c index 41ee2e3c9..511256aa3 100644 --- a/src/init.c +++ b/src/init.c @@ -11,7 +11,6 @@ #include "pg_probackup.h" #include -#include /* * Initialize backup catalog. diff --git a/src/merge.c b/src/merge.c index 03698e92d..62ce3c300 100644 --- a/src/merge.c +++ b/src/merge.c @@ -9,7 +9,6 @@ #include "pg_probackup.h" -#include #include #include "utils/thread.h" diff --git a/src/pg_probackup.c b/src/pg_probackup.c index a580fb3c2..dda5cf65a 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -47,8 +47,6 @@ #include "streamutil.h" #include "utils/file.h" -#include - #include "utils/configuration.h" #include "utils/thread.h" #include diff --git a/src/restore.c b/src/restore.c index 28a79f1ed..7b37b2306 100644 --- a/src/restore.c +++ b/src/restore.c @@ -12,7 +12,6 @@ #include "access/timeline.h" -#include #include #include "utils/thread.h" diff --git a/src/show.c b/src/show.c index db8a9e225..46002198d 100644 --- a/src/show.c +++ b/src/show.c @@ -13,7 +13,6 @@ #include #include #include -#include #include "utils/json.h" diff --git a/src/utils/file.c b/src/utils/file.c index 6e8b5e9f3..d4282b8fc 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -2,8 +2,6 @@ #include #include "pg_probackup.h" -/* sys/stat.h must be included after pg_probackup.h (see problems with compilation for windows described in PGPRO-5750) */ -#include #include #include "file.h" diff --git a/src/utils/file.h b/src/utils/file.h index 7fd1e7919..79e86ee20 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -3,7 +3,9 @@ #include "storage/bufpage.h" #include +#ifndef WIN32 #include +#endif #include #ifdef HAVE_LIBZ @@ -223,11 +225,13 @@ fobj_iface(pioWriteFlush); fobj_iface(pioWriteCloser); fobj_iface(pioReadCloser); +typedef struct stat stat_t; + // Drive #define mth__pioOpen pioFile_i, (path_t, path), (int, flags), \ (int, permissions), (err_i *, err) #define mth__pioOpen__optional() (permissions, FILE_PERMISSION) -#define mth__pioStat struct stat, (path_t, path), (bool, follow_symlink), \ +#define mth__pioStat stat_t, (path_t, path), (bool, follow_symlink), \ (err_i *, err) #define mth__pioRemove err_i, (path_t, path), (bool, missing_ok) #define mth__pioRename err_i, (path_t, old_path), (path_t, new_path) diff --git a/src/validate.c b/src/validate.c index 79a450ac8..8e402a1c5 100644 --- a/src/validate.c +++ b/src/validate.c @@ -10,7 +10,6 @@ #include "pg_probackup.h" -#include #include #include "utils/thread.h" From ca6e3942a5ad6795fadc5da444000b341b9ee824 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 13 Sep 2022 13:14:29 +0300 Subject: [PATCH 051/339] [PBCKP-245] make Makefile more portable It is hard to consider all 'echo in makefile shell' variants. Mingw's one doesn't process escape sequence. That is why it is better to use raw TAB symbol. --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 21553f97c..2c126af29 100644 --- a/Makefile +++ b/Makefile @@ -97,7 +97,7 @@ borrowed.mk: $(firstword $(MAKEFILE_LIST)) $(file >$@,# This file is autogenerated. Do not edit!) $(foreach borrowed_file, $(BORROWED_H_SRC) $(BORROWED_C_SRC), \ $(file >>$@,$(addprefix $(BORROW_DIR)/, $(notdir $(borrowed_file))): | $(CURDIR)/$(BORROW_DIR)/ $(realpath $(top_srcdir)/$(borrowed_file))) \ - $(file >>$@,$(shell echo "\t"'$$(LN_S) $(realpath $(top_srcdir)/$(borrowed_file)) $$@')) \ + $(file >>$@,$(shell echo " "'$$(LN_S) $(realpath $(top_srcdir)/$(borrowed_file)) $$@')) \ ) include borrowed.mk From 623ddbb6a3d4bdfc052bbab3e24b4fa2403e619e Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 30 Sep 2022 11:06:16 +0300 Subject: [PATCH 052/339] [PBCKP-146] Small fix for remote_agent --- src/pg_probackup.c | 3 ++- src/pg_probackup.h | 2 +- src/utils/configuration.c | 1 - 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/pg_probackup.c b/src/pg_probackup.c index b7308405c..abddf7fc1 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -86,7 +86,7 @@ bool perm_slot = false; /* backup options */ bool backup_logs = false; bool smooth_checkpoint; -char *remote_agent; +bool remote_agent = false; static char *backup_note = NULL; /* catchup options */ static char *catchup_source_pgdata = NULL; @@ -361,6 +361,7 @@ main(int argc, char *argv[]) elog(ERROR, "Version mismatch, pg_probackup binary with version '%s' " "is launched as an agent for pg_probackup binary with version '%s'", PROGRAM_VERSION, argv[2]); + remote_agent = true; fio_communicate(STDIN_FILENO, STDOUT_FILENO); return 0; case HELP_CMD: diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 7ce455459..d51fa6c17 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -774,7 +774,7 @@ extern bool perm_slot; extern bool smooth_checkpoint; /* remote probackup options */ -extern char* remote_agent; +extern bool remote_agent; /* delete options */ extern bool delete_wal; diff --git a/src/utils/configuration.c b/src/utils/configuration.c index a1710e3dd..5e3efff4c 100644 --- a/src/utils/configuration.c +++ b/src/utils/configuration.c @@ -530,7 +530,6 @@ config_get_opt(int argc, char **argv, ConfigOption cmd_options[], opt = option_find(c, options); if (opt - && !remote_agent && opt->allowed < SOURCE_CMD && opt->allowed != SOURCE_CMD_STRICT) elog(ERROR, "Option %s cannot be specified in command line", opt->lname); From 085b99fc7c09cbf59cbddd2e485e8eebfe00277d Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 15 Sep 2022 11:18:39 +0300 Subject: [PATCH 053/339] [PBCKP-245] more mingw compatibility - pid_t is 64bit in mingw. Lets cast it to long long in most places on I/O. - int64 should be casted to long long as well - size_t should be printed as %zu/%zd - stat functino is imported from pgport, and there's no need to redefine it again. - no need to redeclare `__thread` since gcc work with it well. - arguments and types in launch agent. --- src/archive.c | 2 +- src/catalog.c | 36 ++++++++++++++--------------- src/catchup.c | 14 ++++++------ src/data.c | 28 +++++++++++------------ src/merge.c | 2 +- src/restore.c | 16 ++++++------- src/show.c | 8 +++---- src/utils/file.c | 56 ++++++++++------------------------------------ src/utils/logger.c | 8 +++---- src/utils/remote.c | 18 ++++++--------- src/validate.c | 4 ++-- 11 files changed, 78 insertions(+), 114 deletions(-) diff --git a/src/archive.c b/src/archive.c index 693270fce..a930ff557 100644 --- a/src/archive.c +++ b/src/archive.c @@ -148,7 +148,7 @@ do_archive_push(InstanceState *instanceState, InstanceConfig *instance, char *pg n_threads = parray_num(batch_files); elog(INFO, "pg_probackup archive-push WAL file: %s, " - "threads: %i/%i, batch: %lu/%i, compression: %s", + "threads: %i/%i, batch: %zu/%i, compression: %s", wal_file_name, n_threads, num_threads, parray_num(batch_files), batch_size, is_compress ? "zlib" : "none"); diff --git a/src/catalog.c b/src/catalog.c index 212add4ca..5c44a9940 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -422,8 +422,8 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) /* complain every fifth interval */ if ((ntries % LOG_FREQ) == 0) { - elog(WARNING, "Process %d is using backup %s, and is still running", - encoded_pid, backup_id); + elog(WARNING, "Process %lld is using backup %s, and is still running", + (long long)encoded_pid, backup_id); elog(WARNING, "Waiting %u seconds on exclusive lock for backup %s", ntries, backup_id); @@ -437,8 +437,8 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) else { if (errno == ESRCH) - elog(WARNING, "Process %d which used backup %s no longer exists", - encoded_pid, backup_id); + elog(WARNING, "Process %lld which used backup %s no longer exists", + (long long)encoded_pid, backup_id); else elog(ERROR, "Failed to send signal 0 to a process %d: %s", encoded_pid, strerror(errno)); @@ -467,7 +467,7 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) /* * Successfully created the file, now fill it. */ - snprintf(buffer, sizeof(buffer), "%d\n", my_pid); + snprintf(buffer, sizeof(buffer), "%lld\n", (long long)my_pid); errno = 0; if (fio_write(fd, buffer, strlen(buffer)) != strlen(buffer)) @@ -574,8 +574,8 @@ wait_shared_owners(pgBackup *backup) /* complain from time to time */ if ((ntries % LOG_FREQ) == 0) { - elog(WARNING, "Process %d is using backup %s in shared mode, and is still running", - encoded_pid, base36enc(backup->start_time)); + elog(WARNING, "Process %lld is using backup %s in shared mode, and is still running", + (long long)encoded_pid, base36enc(backup->start_time)); elog(WARNING, "Waiting %u seconds on lock for backup %s", ntries, base36enc(backup->start_time)); @@ -587,8 +587,8 @@ wait_shared_owners(pgBackup *backup) continue; } else if (errno != ESRCH) - elog(ERROR, "Failed to send signal 0 to a process %d: %s", - encoded_pid, strerror(errno)); + elog(ERROR, "Failed to send signal 0 to a process %lld: %s", + (long long)encoded_pid, strerror(errno)); /* locker is dead */ break; @@ -605,8 +605,8 @@ wait_shared_owners(pgBackup *backup) /* some shared owners are still alive */ if (ntries <= 0) { - elog(WARNING, "Cannot to lock backup %s in exclusive mode, because process %u owns shared lock", - base36enc(backup->start_time), encoded_pid); + elog(WARNING, "Cannot to lock backup %s in exclusive mode, because process %llu owns shared lock", + base36enc(backup->start_time), (long long)encoded_pid); return 1; } @@ -661,11 +661,11 @@ grab_shared_lock_file(pgBackup *backup) * Somebody is still using this backup in shared mode, * copy this pid into a new file. */ - buffer_len += snprintf(buffer+buffer_len, 4096, "%u\n", encoded_pid); + buffer_len += snprintf(buffer+buffer_len, 4096, "%llu\n", (long long)encoded_pid); } else if (errno != ESRCH) - elog(ERROR, "Failed to send signal 0 to a process %d: %s", - encoded_pid, strerror(errno)); + elog(ERROR, "Failed to send signal 0 to a process %lld: %s", + (long long)encoded_pid, strerror(errno)); } if (fp_in) @@ -685,7 +685,7 @@ grab_shared_lock_file(pgBackup *backup) } /* add my own pid */ - buffer_len += snprintf(buffer+buffer_len, sizeof(buffer), "%u\n", my_pid); + buffer_len += snprintf(buffer+buffer_len, sizeof(buffer), "%llu\n", (long long)my_pid); /* write out the collected PIDs to temp lock file */ fwrite(buffer, 1, buffer_len, fp_out); @@ -783,11 +783,11 @@ release_shared_lock_file(const char *backup_dir) * Somebody is still using this backup in shared mode, * copy this pid into a new file. */ - buffer_len += snprintf(buffer+buffer_len, 4096, "%u\n", encoded_pid); + buffer_len += snprintf(buffer+buffer_len, 4096, "%llu\n", (long long)encoded_pid); } else if (errno != ESRCH) - elog(ERROR, "Failed to send signal 0 to a process %d: %s", - encoded_pid, strerror(errno)); + elog(ERROR, "Failed to send signal 0 to a process %lld: %s", + (long long)encoded_pid, strerror(errno)); } if (ferror(fp_in)) diff --git a/src/catchup.c b/src/catchup.c index fa126b884..08bc039f9 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -137,8 +137,8 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, } else if (pid > 1) /* postmaster is up */ { - elog(ERROR, "Postmaster with pid %u is running in destination directory \"%s\"", - pid, dest_pgdata); + elog(ERROR, "Postmaster with pid %lld is running in destination directory \"%s\"", + (long long)pid, dest_pgdata); } } @@ -160,15 +160,15 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, source_id = get_system_identifier(FIO_DB_HOST, source_pgdata, false); /* same as instance_config.system_identifier */ if (source_conn_id != source_id) - elog(ERROR, "Database identifiers mismatch: we connected to DB id %lu, but in \"%s\" we found id %lu", - source_conn_id, source_pgdata, source_id); + elog(ERROR, "Database identifiers mismatch: we connected to DB id %llu, but in \"%s\" we found id %llu", + (long long)source_conn_id, source_pgdata, (long long)source_id); if (current.backup_mode != BACKUP_MODE_FULL) { dest_id = get_system_identifier(FIO_LOCAL_HOST, dest_pgdata, false); if (source_conn_id != dest_id) - elog(ERROR, "Database identifiers mismatch: we connected to DB id %lu, but in \"%s\" we found id %lu", - source_conn_id, dest_pgdata, dest_id); + elog(ERROR, "Database identifiers mismatch: we connected to DB id %llu, but in \"%s\" we found id %llu", + (long long)source_conn_id, dest_pgdata, (long long)dest_id); } } @@ -439,7 +439,7 @@ catchup_thread_runner(void *arg) if (file->write_size == BYTES_INVALID) { - elog(LOG, "Skipping the unchanged file: \"%s\", read %li bytes", from_fullpath, file->read_size); + elog(LOG, "Skipping the unchanged file: \"%s\", read %zu bytes", from_fullpath, file->read_size); continue; } diff --git a/src/data.c b/src/data.c index 6308b110c..fdc7a8918 100644 --- a/src/data.c +++ b/src/data.c @@ -204,12 +204,12 @@ get_header_errormsg(Page page, char **errormsg) if (PageGetPageSize(phdr) != BLCKSZ) snprintf(*errormsg, ERRMSG_MAX_LEN, "page header invalid, " - "page size %lu is not equal to block size %u", + "page size %zu is not equal to block size %u", PageGetPageSize(phdr), BLCKSZ); else if (phdr->pd_lower < SizeOfPageHeaderData) snprintf(*errormsg, ERRMSG_MAX_LEN, "page header invalid, " - "pd_lower %i is less than page header size %lu", + "pd_lower %i is less than page header size %zu", phdr->pd_lower, SizeOfPageHeaderData); else if (phdr->pd_lower > phdr->pd_upper) @@ -229,7 +229,7 @@ get_header_errormsg(Page page, char **errormsg) else if (phdr->pd_special != MAXALIGN(phdr->pd_special)) snprintf(*errormsg, ERRMSG_MAX_LEN, "page header invalid, " - "pd_special %i is misaligned, expected %lu", + "pd_special %i is misaligned, expected %zu", phdr->pd_special, MAXALIGN(phdr->pd_special)); else if (phdr->pd_flags & ~PD_VALID_FLAG_BITS) @@ -1196,7 +1196,7 @@ restore_data_file_internal(FILE *in, FILE *out, pgFile *file, uint32 backup_vers datapagemap_add(map, blknum); } - elog(LOG, "Copied file \"%s\": %lu bytes", from_fullpath, write_len); + elog(LOG, "Copied file \"%s\": %zu bytes", from_fullpath, write_len); return write_len; } @@ -1240,7 +1240,7 @@ restore_non_data_file_internal(FILE *in, FILE *out, pgFile *file, pg_free(buf); - elog(LOG, "Copied file \"%s\": %lu bytes", from_fullpath, file->write_size); + elog(LOG, "Copied file \"%s\": %llu bytes", from_fullpath, (long long)file->write_size); } size_t @@ -1317,9 +1317,9 @@ restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, elog(ERROR, "Failed to locate a full copy of non-data file \"%s\"", to_fullpath); if (tmp_file->write_size <= 0) - elog(ERROR, "Full copy of non-data file has invalid size: %li. " + elog(ERROR, "Full copy of non-data file has invalid size: %lli. " "Metadata corruption in backup %s in file: \"%s\"", - tmp_file->write_size, base36enc(tmp_backup->start_time), + (long long)tmp_file->write_size, base36enc(tmp_backup->start_time), to_fullpath); /* incremental restore */ @@ -2031,11 +2031,11 @@ get_page_header(FILE *in, const char *fullpath, BackupPageHeader* bph, return false; /* EOF found */ else if (read_len != 0 && feof(in)) elog(ERROR, - "Odd size page found at offset %ld of \"%s\"", - ftello(in), fullpath); + "Odd size page found at offset %lld of \"%s\"", + (long long)ftello(in), fullpath); else - elog(ERROR, "Cannot read header at offset %ld of \"%s\": %s", - ftello(in), fullpath, strerror(errno)); + elog(ERROR, "Cannot read header at offset %lld of \"%s\": %s", + (long long)ftello(in), fullpath, strerror(errno)); } /* In older versions < 2.4.0, when crc for file was calculated, header was @@ -2335,8 +2335,8 @@ copy_pages(const char *to_fullpath, const char *from_fullpath, to_fullpath, strerror(errno)); if (ftruncate(fileno(out), file->size) == -1) - elog(ERROR, "Cannot ftruncate file \"%s\" to size %lu: %s", - to_fullpath, file->size, strerror(errno)); + elog(ERROR, "Cannot ftruncate file \"%s\" to size %llu: %s", + to_fullpath, (long long)file->size, strerror(errno)); } } @@ -2443,7 +2443,7 @@ get_data_file_headers(HeaderMap *hdr_map, pgFile *file, uint32 backup_version, b if (hdr_crc != file->hdr_crc) { elog(strict ? ERROR : WARNING, "Header map for file \"%s\" crc mismatch \"%s\" " - "offset: %llu, len: %lu, current: %u, expected: %u", + "offset: %llu, len: %zu, current: %u, expected: %u", file->rel_path, hdr_map->path, file->hdr_off, read_len, hdr_crc, file->hdr_crc); goto cleanup; } diff --git a/src/merge.c b/src/merge.c index 62ce3c300..f64b72611 100644 --- a/src/merge.c +++ b/src/merge.c @@ -957,7 +957,7 @@ merge_files(void *arg) if (S_ISDIR(dest_file->mode)) goto done; - elog(progress ? INFO : LOG, "Progress: (%d/%lu). Merging file \"%s\"", + elog(progress ? INFO : LOG, "Progress: (%d/%zu). Merging file \"%s\"", i + 1, n_files, dest_file->rel_path); if (dest_file->is_datafile && !dest_file->is_cfs) diff --git a/src/restore.c b/src/restore.c index 7b37b2306..ce0604b0a 100644 --- a/src/restore.c +++ b/src/restore.c @@ -1104,14 +1104,14 @@ static void * restore_files(void *arg) { int i; - uint64 n_files; + size_t n_files; char to_fullpath[MAXPGPATH]; FILE *out = NULL; char *out_buf = pgut_malloc(STDIO_BUFSIZE); restore_files_arg *arguments = (restore_files_arg *) arg; - n_files = (unsigned long) parray_num(arguments->dest_files); + n_files = parray_num(arguments->dest_files); for (i = 0; i < parray_num(arguments->dest_files); i++) { @@ -1132,7 +1132,7 @@ restore_files(void *arg) if (interrupted || thread_interrupted) elog(ERROR, "Interrupted during restore"); - elog(progress ? INFO : LOG, "Progress: (%d/%lu). Restore file \"%s\"", + elog(progress ? INFO : LOG, "Progress: (%d/%zu). Restore file \"%s\"", i + 1, n_files, dest_file->rel_path); /* Only files from pgdata can be skipped by partial restore */ @@ -2173,8 +2173,8 @@ check_incremental_compatibility(const char *pgdata, uint64 system_identifier, } else if (pid > 1) /* postmaster is up */ { - elog(WARNING, "Postmaster with pid %u is running in destination directory \"%s\"", - pid, pgdata); + elog(WARNING, "Postmaster with pid %llu is running in destination directory \"%s\"", + (long long)pid, pgdata); success = false; postmaster_is_up = true; } @@ -2197,9 +2197,9 @@ check_incremental_compatibility(const char *pgdata, uint64 system_identifier, if (system_id_pgdata == instance_config.system_identifier) system_id_match = true; else - elog(WARNING, "Backup catalog was initialized for system id %lu, " - "but destination directory system id is %lu", - system_identifier, system_id_pgdata); + elog(WARNING, "Backup catalog was initialized for system id %llu, " + "but destination directory system id is %llu", + (long long)system_identifier, (long long)system_id_pgdata); /* * TODO: maybe there should be some other signs, pointing to pg_control diff --git a/src/show.c b/src/show.c index 46002198d..5440e28a2 100644 --- a/src/show.c +++ b/src/show.c @@ -910,7 +910,7 @@ show_archive_plain(const char *instance_name, uint32 xlog_seg_size, cur++; /* N files */ - snprintf(row->n_segments, lengthof(row->n_segments), "%lu", + snprintf(row->n_segments, lengthof(row->n_segments), "%zu", tlinfo->n_xlog_files); widths[cur] = Max(widths[cur], strlen(row->n_segments)); cur++; @@ -930,7 +930,7 @@ show_archive_plain(const char *instance_name, uint32 xlog_seg_size, cur++; /* N backups */ - snprintf(row->n_backups, lengthof(row->n_backups), "%lu", + snprintf(row->n_backups, lengthof(row->n_backups), "%zu", tlinfo->backups?parray_num(tlinfo->backups):0); widths[cur] = Max(widths[cur], strlen(row->n_backups)); cur++; @@ -1086,10 +1086,10 @@ show_archive_json(const char *instance_name, uint32 xlog_seg_size, json_add_value(buf, "max-segno", tmp_buf, json_level, true); json_add_key(buf, "n-segments", json_level); - appendPQExpBuffer(buf, "%lu", tlinfo->n_xlog_files); + appendPQExpBuffer(buf, "%zu", tlinfo->n_xlog_files); json_add_key(buf, "size", json_level); - appendPQExpBuffer(buf, "%lu", tlinfo->size); + appendPQExpBuffer(buf, "%zu", tlinfo->size); json_add_key(buf, "zratio", json_level); diff --git a/src/utils/file.c b/src/utils/file.c index d4282b8fc..fc97ab810 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -77,8 +77,8 @@ typedef struct #define fio_fileno(f) (((size_t)f - 1) | FIO_PIPE_MARKER) #if defined(WIN32) -#undef open(a, b, c) -#undef fopen(a, b) +#undef open +#undef fopen #endif void @@ -138,43 +138,6 @@ fio_is_remote_fd(int fd) return (fd & FIO_PIPE_MARKER) != 0; } -#ifdef WIN32 - -#undef stat - -/* - * The stat() function in win32 is not guaranteed to update the st_size - * field when run. So we define our own version that uses the Win32 API - * to update this field. - */ -static int -fio_safestat(const char *path, struct stat *buf) -{ - int r; - WIN32_FILE_ATTRIBUTE_DATA attr; - - r = stat(path, buf); - if (r < 0) - return r; - - if (!GetFileAttributesEx(path, GetFileExInfoStandard, &attr)) - { - errno = ENOENT; - return -1; - } - - /* - * XXX no support for large files here, but we don't do that in general on - * Win32 yet. - */ - buf->st_size = attr.nFileSizeLow; - - return 0; -} - -#define stat(x, y) fio_safestat(x, y) -#endif /* WIN32 */ - #ifdef WIN32 /* TODO: use real pread on Linux */ static ssize_t @@ -2202,10 +2165,10 @@ fio_copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, elog(ERROR, "Cannot change mode of \"%s\": %s", to_fullpath, strerror(errno)); - elog(VERBOSE, "ftruncate file \"%s\" to size %lu", + elog(VERBOSE, "ftruncate file \"%s\" to size %zu", to_fullpath, file->size); if (fio_ftruncate(out, file->size) == -1) - elog(ERROR, "Cannot ftruncate file \"%s\" to size %lu: %s", + elog(ERROR, "Cannot ftruncate file \"%s\" to size %zu: %s", to_fullpath, file->size, strerror(errno)); if (!fio_is_remote_file(out)) @@ -3099,6 +3062,7 @@ local_check_postmaster(const char *pgdata) { FILE *fp; pid_t pid; + long long lpid; char pid_file[MAXPGPATH]; join_path_components(pid_file, pgdata, "postmaster.pid"); @@ -3114,7 +3078,11 @@ local_check_postmaster(const char *pgdata) pid_file, strerror(errno)); } - if (fscanf(fp, "%i", &pid) != 1) + if (fscanf(fp, "%lli", &lpid) == 1) + { + pid = lpid; + } + else { /* something is wrong with the file content */ pid = 1; @@ -3128,8 +3096,8 @@ local_check_postmaster(const char *pgdata) if (errno == ESRCH) pid = 0; else - elog(ERROR, "Failed to send signal 0 to a process %d: %s", - pid, strerror(errno)); + elog(ERROR, "Failed to send signal 0 to a process %lld: %s", + (long long)pid, strerror(errno)); } } diff --git a/src/utils/logger.c b/src/utils/logger.c index e49012368..ec9194ec0 100644 --- a/src/utils/logger.c +++ b/src/utils/logger.c @@ -343,7 +343,7 @@ elog_internal(int elevel, bool file_only, const char *message) if (format_file == JSON || format_console == JSON) { - snprintf(str_pid_json, sizeof(str_pid_json), "%d", my_pid); + snprintf(str_pid_json, sizeof(str_pid_json), "%lld", (long long)my_pid); snprintf(str_thread_json, sizeof(str_thread_json), "[%d-1]", my_thread_num()); initPQExpBuffer(&show_buf); @@ -357,7 +357,7 @@ elog_internal(int elevel, bool file_only, const char *message) json_add_min(buf_json, JT_END_OBJECT); } - snprintf(str_pid, sizeof(str_pid), "[%d]:", my_pid); + snprintf(str_pid, sizeof(str_pid), "[%lld]:", (long long)my_pid); /* * Write message to log file. @@ -497,7 +497,7 @@ elog_stderr(int elevel, const char *fmt, ...) { strftime(strfbuf, sizeof(strfbuf), "%Y-%m-%d %H:%M:%S %Z", localtime(&log_time)); - snprintf(str_pid, sizeof(str_pid), "%d", my_pid); + snprintf(str_pid, sizeof(str_pid), "%lld", (long long)my_pid); snprintf(str_thread_json, sizeof(str_thread_json), "[%d-1]", my_thread_num()); initPQExpBuffer(&show_buf); @@ -971,7 +971,7 @@ open_logfile(FILE **file, const char *filename_format) elog_stderr(ERROR, "cannot open rotation file \"%s\": %s", control, strerror(errno)); - fprintf(control_file, "%ld", timestamp); + fprintf(control_file, "%lld", (long long)timestamp); fclose(control_file); } diff --git a/src/utils/remote.c b/src/utils/remote.c index 3286052a5..bceccc26a 100644 --- a/src/utils/remote.c +++ b/src/utils/remote.c @@ -5,12 +5,6 @@ #include #include -#ifdef WIN32 -#define __thread __declspec(thread) -#else -#include -#endif - #include "pg_probackup.h" #include "file.h" @@ -113,14 +107,14 @@ bool launch_agent(void) char cmd[MAX_CMDLINE_LENGTH]; char* ssh_argv[MAX_CMDLINE_OPTIONS]; int ssh_argc; - int outfd[2]; - int infd[2]; - int errfd[2]; + int outfd[2] = {0, 0}; + int infd[2] = {0, 0}; + int errfd[2] = {0, 0}; int agent_version; ssh_argc = 0; #ifdef WIN32 - ssh_argv[ssh_argc++] = PROGRAM_NAME_FULL; + ssh_argv[ssh_argc++] = (char *) PROGRAM_NAME_FULL; ssh_argv[ssh_argc++] = "ssh"; ssh_argc += 2; /* reserve space for pipe descriptors */ #endif @@ -198,7 +192,9 @@ bool launch_agent(void) ssh_argv[2] = psprintf("%d", outfd[0]); ssh_argv[3] = psprintf("%d", infd[1]); { - intptr_t pid = _spawnvp(_P_NOWAIT, ssh_argv[0], ssh_argv); + intptr_t pid = _spawnvp(_P_NOWAIT, + (const char*)ssh_argv[0], + (const char * const *) ssh_argv); if (pid < 0) return false; child_pid = GetProcessId((HANDLE)pid); diff --git a/src/validate.c b/src/validate.c index 8e402a1c5..f2b99e3a9 100644 --- a/src/validate.c +++ b/src/validate.c @@ -312,8 +312,8 @@ pgBackupValidateFiles(void *arg) if (file->write_size != st.st_size) { - elog(WARNING, "Invalid size of backup file \"%s\" : " INT64_FORMAT ". Expected %lu", - file_fullpath, (unsigned long) st.st_size, file->write_size); + elog(WARNING, "Invalid size of backup file \"%s\" : %lld. Expected %lld", + file_fullpath, (long long) st.st_size, (long long)file->write_size); arguments->corrupted = true; break; } From 0e85c52a983e62276b9ab985a17da2d42a7d562a Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 19 Sep 2022 21:47:38 +0300 Subject: [PATCH 054/339] [PBCKP-245] mingw: ucrt strftime is good enough msvcrt's strftime is quite limited, and we had to use pg_strftime. On the other hand, ucrt's one is capable for most of SU (Single UNIX) extensions, so we could safely use it. That means, we restrict windows port to MinGW64 UCRT environment. --- src/pg_probackup.h | 4 ++++ src/utils/logger.c | 4 ---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 13d4b06f0..dc7effd9d 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -43,6 +43,10 @@ #include "pg_probackup_state.h" +#if defined(WIN32) && !(defined(_UCRT) && defined(__MINGW64__)) +#error Windows port requires compilation in MinGW64 UCRT environment +#endif + /* Wrap the code that we're going to delete after refactoring in this define*/ #define REFACTORE_ME diff --git a/src/utils/logger.c b/src/utils/logger.c index ec9194ec0..57b96e020 100644 --- a/src/utils/logger.c +++ b/src/utils/logger.c @@ -811,11 +811,7 @@ logfile_getname(const char *format, time_t timestamp) len = strlen(filename); /* Treat log_filename as a strftime pattern */ -#ifdef WIN32 - if (pg_strftime(filename + len, MAXPGPATH - len, format, tm) <= 0) -#else if (strftime(filename + len, MAXPGPATH - len, format, tm) <= 0) -#endif elog_stderr(ERROR, "strftime(%s) failed: %s", format, strerror(errno)); return filename; From 0e0027bd37adbf87d70473bc45442f009003b908 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 20 Sep 2022 14:15:04 +0300 Subject: [PATCH 055/339] [PBCKP-245] fix EACCES usage I can't find where EACCESS is used in postgres. Nor I can find where it is defined in Windows. It is quite strange it worked before, I can't explain it. --- src/utils/file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/utils/file.c b/src/utils/file.c index fc97ab810..9f8301c56 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -156,7 +156,7 @@ remove_file_or_dir(const char* path) { int rc = remove(path); - if (rc < 0 && errno == EACCESS) + if (rc < 0 && errno == EACCES) rc = rmdir(path); return rc; } From 4ec7b9c08828d9b90206e8f65cdb8854f18d55b8 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 14 Oct 2022 11:22:18 +0300 Subject: [PATCH 056/339] [PBCKP-146] - fio_get_crc32 - add "missing_ok" parameter --- src/data.c | 10 +++++++--- src/utils/file.c | 28 ++++++++++++++++++---------- src/utils/file.h | 4 +++- 3 files changed, 28 insertions(+), 14 deletions(-) diff --git a/src/data.c b/src/data.c index 17ae4b91a..36587a139 100644 --- a/src/data.c +++ b/src/data.c @@ -801,8 +801,11 @@ backup_non_data_file(pgFile *file, pgFile *prev_file, (prev_file && file->exists_in_prev && file->mtime <= parent_backup_time)) { - - file->crc = fio_get_crc32(FIO_DB_HOST, from_fullpath, false); + /* + * file could be deleted under our feets. + * But then backup_non_data_file_internal will handle it safely + */ + file->crc = fio_get_crc32(FIO_DB_HOST, from_fullpath, false, true); /* ...and checksum is the same... */ if (EQ_TRADITIONAL_CRC32(file->crc, prev_file->crc)) @@ -1327,7 +1330,8 @@ restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, if (already_exists) { /* compare checksums of already existing file and backup file */ - pg_crc32 file_crc = fio_get_crc32(FIO_DB_HOST, to_fullpath, false); + pg_crc32 file_crc = fio_get_crc32(FIO_DB_HOST, to_fullpath, false, + false); if (file_crc == tmp_file->crc) { diff --git a/src/utils/file.c b/src/utils/file.c index 86977a19a..92ed1c725 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1402,9 +1402,15 @@ fio_sync(fio_location location, const char* path) } } +enum { + GET_CRC32_DECOMPRESS = 1, + GET_CRC32_MISSING_OK = 2 +}; + /* Get crc32 of file */ pg_crc32 -fio_get_crc32(fio_location location, const char *file_path, bool decompress) +fio_get_crc32(fio_location location, const char *file_path, + bool decompress, bool missing_ok) { if (fio_is_remote(location)) { @@ -1417,7 +1423,9 @@ fio_get_crc32(fio_location location, const char *file_path, bool decompress) hdr.arg = 0; if (decompress) - hdr.arg = 1; + hdr.arg = GET_CRC32_DECOMPRESS; + if (missing_ok) + hdr.arg |= GET_CRC32_MISSING_OK; IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_write_all(fio_stdout, file_path, path_len), path_len); @@ -1428,9 +1436,9 @@ fio_get_crc32(fio_location location, const char *file_path, bool decompress) else { if (decompress) - return pgFileGetCRCgz(file_path, true, true); + return pgFileGetCRCgz(file_path, true, missing_ok); else - return pgFileGetCRC(file_path, true, true); + return pgFileGetCRC(file_path, true, missing_ok); } } @@ -3365,10 +3373,10 @@ fio_communicate(int in, int out) break; case FIO_GET_CRC32: /* calculate crc32 for a file */ - if (hdr.arg == 1) - crc = pgFileGetCRCgz(buf, true, true); + if ((hdr.arg & GET_CRC32_DECOMPRESS)) + crc = pgFileGetCRCgz(buf, true, (hdr.arg & GET_CRC32_MISSING_OK) != 0); else - crc = pgFileGetCRC(buf, true, true); + crc = pgFileGetCRC(buf, true, (hdr.arg & GET_CRC32_MISSING_OK) != 0); IO_CHECK(fio_write_all(out, &crc, sizeof(crc)), sizeof(crc)); break; case FIO_GET_CHECKSUM_MAP: @@ -3606,9 +3614,9 @@ pioLocalDrive_pioGetCRC32(VSelf, path_t path, bool compressed, err_i *err) elog(VERBOSE, "Local Drive calculate crc32 for '%s', compressed=%d", path, compressed); if (compressed) - return pgFileGetCRCgz(path, true, true); + return pgFileGetCRCgz(path, true, false); else - return pgFileGetCRC(path, true, true); + return pgFileGetCRC(path, true, false); } static bool @@ -3867,7 +3875,7 @@ pioRemoteDrive_pioGetCRC32(VSelf, path_t path, bool compressed, err_i *err) hdr.arg = 0; if (compressed) - hdr.arg = 1; + hdr.arg = GET_CRC32_DECOMPRESS; elog(VERBOSE, "Remote Drive calculate crc32 for '%s', hdr.arg=%d", path, compressed); diff --git a/src/utils/file.h b/src/utils/file.h index 7fd1e7919..c1c42672d 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -161,7 +161,9 @@ extern int fio_closedir(DIR *dirp); /* pathname-style functions */ extern int fio_sync(fio_location location, const char* path); -extern pg_crc32 fio_get_crc32(fio_location location, const char *file_path, bool decompress); +extern pg_crc32 +fio_get_crc32(fio_location location, const char *file_path, + bool decompress, bool missing_ok); extern int fio_rename(fio_location location, const char* old_path, const char* new_path); extern int fio_symlink(fio_location location, const char* target, const char* link_path, bool overwrite); From cbc37b22c6595e2ce8922acf3a68210f66667816 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 5 Oct 2022 12:16:10 +0300 Subject: [PATCH 057/339] [PBCKP-146] prettify forkname handling. --- src/catalog.c | 5 ++- src/dir.c | 87 ++++++++++++++++++++++------------------------ src/pg_probackup.h | 2 ++ 3 files changed, 48 insertions(+), 46 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index b4be159d1..6aa26c6b3 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1138,6 +1138,9 @@ get_backup_filelist(pgBackup *backup, bool strict) if (get_control_value_int64(buf, "hdr_size", &hdr_size, false)) file->hdr_size = (int) hdr_size; + if (file->external_dir_num == 0) + set_forkname(file); + parray_append(files, file); } @@ -2516,7 +2519,7 @@ write_backup_filelist(pgBackup *backup, parray *files, const char *root, char control_path[MAXPGPATH]; char control_path_temp[MAXPGPATH]; size_t i = 0; - #define BUFFERSZ 1024*1024 + #define BUFFERSZ (1024*1024) char *buf; int64 backup_size_on_disk = 0; int64 uncompressed_size_on_disk = 0; diff --git a/src/dir.c b/src/dir.c index 0bcd60169..e0bf039c6 100644 --- a/src/dir.c +++ b/src/dir.c @@ -680,57 +680,22 @@ dir_check_file(pgFile *file, bool backup_logs) return CHECK_FALSE; else if (isdigit(file->name[0])) { - char *fork_name; - int len; - char suffix[MAXPGPATH]; + set_forkname(file); - fork_name = strstr(file->name, "_"); - if (fork_name) - { - /* Auxiliary fork of the relfile */ - if (strcmp(fork_name, "_vm") == 0) - file->forkName = vm; - - else if (strcmp(fork_name, "_fsm") == 0) - file->forkName = fsm; - - else if (strcmp(fork_name, "_cfm") == 0) - file->forkName = cfm; - - else if (strcmp(fork_name, "_ptrack") == 0) - file->forkName = ptrack; - - else if (strcmp(fork_name, "_init") == 0) - file->forkName = init; - - // extract relOid for certain forks - if (file->forkName == vm || - file->forkName == fsm || - file->forkName == init || - file->forkName == cfm) - { - // sanity - if (sscanf(file->name, "%u_*", &(file->relOid)) != 1) - file->relOid = 0; - } + if (file->forkName == ptrack) /* Compatibility with left-overs from ptrack1 */ + return CHECK_FALSE; + else if (file->forkName != none) + return CHECK_TRUE; - /* Do not backup ptrack files */ - if (file->forkName == ptrack) - return CHECK_FALSE; - } - else + /* Set is_datafile flag */ { + char suffix[MAXFNAMELEN]; - len = strlen(file->name); - /* reloid.cfm */ - if (len > 3 && strcmp(file->name + len - 3, "cfm") == 0) - return CHECK_TRUE; - + /* check if file is datafile */ sscanf_res = sscanf(file->name, "%u.%d.%s", &(file->relOid), &(file->segno), suffix); - if (sscanf_res == 0) - elog(ERROR, "Cannot parse file name \"%s\"", file->name); - else if (sscanf_res == 1 || sscanf_res == 2) + Assert(sscanf_res > 0); /* since first char is digit */ + if (sscanf_res == 1 || sscanf_res == 2) file->is_datafile = true; } } @@ -1869,3 +1834,35 @@ pfilearray_clear_locks(parray *file_list) pg_atomic_clear_flag(&file->lock); } } + +/* Set forkName if possible */ +void +set_forkname(pgFile *file) +{ + int name_len = strlen(file->name); + + /* Auxiliary fork of the relfile */ + if (name_len > 3 && strcmp(file->name + name_len - 3, "_vm") == 0) + file->forkName = vm; + + else if (name_len > 4 && strcmp(file->name + name_len - 4, "_fsm") == 0) + file->forkName = fsm; + + else if (name_len > 4 && strcmp(file->name + name_len - 4, ".cfm") == 0) + file->forkName = cfm; + + else if (name_len > 5 && strcmp(file->name + name_len - 5, "_init") == 0) + file->forkName = init; + + else if (name_len > 7 && strcmp(file->name + name_len - 7, "_ptrack") == 0) + file->forkName = ptrack; + + // extract relOid for certain forks + + if ((file->forkName == vm || + file->forkName == fsm || + file->forkName == init || + file->forkName == cfm) && + (sscanf(file->name, "%u*", &(file->relOid)) != 1)) + file->relOid = 0; +} diff --git a/src/pg_probackup.h b/src/pg_probackup.h index d51fa6c17..1b625cc65 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -200,6 +200,7 @@ typedef enum CompressAlg typedef enum ForkName { + none, vm, fsm, cfm, @@ -1061,6 +1062,7 @@ extern int pgCompareString(const void *str1, const void *str2); extern int pgPrefixCompareString(const void *str1, const void *str2); extern int pgCompareOid(const void *f1, const void *f2); extern void pfilearray_clear_locks(parray *file_list); +extern void set_forkname(pgFile *file); /* in data.c */ extern bool check_data_file(ConnectionArgs *arguments, pgFile *file, From 646baf78fa82116c8183299f7213e5af4f75a09f Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 10 Oct 2022 17:07:41 +0300 Subject: [PATCH 058/339] [PBCKP-146] stabilize couple of tests. --- tests/cfs_backup.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tests/cfs_backup.py b/tests/cfs_backup.py index d820360fe..436db31e7 100644 --- a/tests/cfs_backup.py +++ b/tests/cfs_backup.py @@ -995,6 +995,11 @@ def test_delete_random_cfm_file_from_tablespace_dir(self): "FROM generate_series(0,256) i".format('t1', tblspace_name) ) + self.node.safe_psql( + "postgres", + "CHECKPOINT" + ) + list_cmf = find_by_extensions( [self.get_tblspace_path(self.node, tblspace_name)], ['.cfm']) @@ -1044,6 +1049,11 @@ def test_delete_random_data_file_from_tablespace_dir(self): "FROM generate_series(0,256) i".format('t1', tblspace_name) ) + self.node.safe_psql( + "postgres", + "CHECKPOINT" + ) + list_data_files = find_by_pattern( [self.get_tblspace_path(self.node, tblspace_name)], '^.*/\d+$') From 7aa7d7ab3edfa464a9de6b4a9a6d673ec3bf34d8 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 11 Oct 2022 18:59:48 +0300 Subject: [PATCH 059/339] [PBCKP-146] fix cfs test python3 compatibility --- tests/cfs_restore.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/cfs_restore.py b/tests/cfs_restore.py index 07cf891aa..611afc49e 100644 --- a/tests/cfs_restore.py +++ b/tests/cfs_restore.py @@ -103,6 +103,7 @@ def test_restore_empty_tablespace_from_fullbackup(self): "postgres", "SELECT * FROM pg_tablespace WHERE spcname='{0}'".format(tblspace_name) ) + tblspace = str(tblspace) self.assertTrue( tblspace_name in tblspace and "compression=true" in tblspace, "ERROR: The tablespace not restored or it restored without compressions" From 9c20466fa0ea826b3e3e1b68a74576c515d4ab5a Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" <16117281+kulaginm@users.noreply.github.com> Date: Thu, 8 Sep 2022 09:15:24 +0300 Subject: [PATCH 060/339] [PBCKP-263] fix for tests.archive.ArchiveTest.test_archive_get_batching_sanity (#532) --- src/archive.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/archive.c b/src/archive.c index 0ebe5e504..f5a9ad114 100644 --- a/src/archive.c +++ b/src/archive.c @@ -969,7 +969,7 @@ uint32 run_wal_prefetch(const char *prefetch_dir, const char *archive_dir, /* init thread args */ threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads); - threads_args = (archive_get_arg *) palloc(sizeof(archive_get_arg) * num_threads); + threads_args = (archive_get_arg *) palloc0(sizeof(archive_get_arg) * num_threads); for (i = 0; i < num_threads; i++) { @@ -980,6 +980,7 @@ uint32 run_wal_prefetch(const char *prefetch_dir, const char *archive_dir, arg->thread_num = i+1; arg->files = batch_files; + arg->n_fetched = 0; } /* Run threads */ From d74937200ef6b12bac9554578a910b0a62b8db41 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 14 Oct 2022 17:36:32 +0300 Subject: [PATCH 061/339] [PBCKP-258] fix multiple permission tests --- tests/backup.py | 4 ++-- tests/checkdb.py | 9 ++++++--- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/tests/backup.py b/tests/backup.py index 3dfc5c5e1..c720cb9d2 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -1902,10 +1902,10 @@ def test_backup_with_least_privileges_role(self): "GRANT EXECUTE ON FUNCTION ptrack.ptrack_init_lsn() TO backup;") if ProbackupTest.enterprise: - node.safe_psql( "backupdb", - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup") + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup;" + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;") # FULL backup self.backup_node( diff --git a/tests/checkdb.py b/tests/checkdb.py index c5465efca..d01ced960 100644 --- a/tests/checkdb.py +++ b/tests/checkdb.py @@ -669,7 +669,6 @@ def test_checkdb_with_least_privileges(self): 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup;' 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup; ' ) if ProbackupTest.enterprise: # amcheck-1.1 @@ -710,7 +709,6 @@ def test_checkdb_with_least_privileges(self): 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; ' 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup; ' ) # checkunique parameter if ProbackupTest.enterprise: @@ -749,7 +747,6 @@ def test_checkdb_with_least_privileges(self): 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anycompatiblearray, anycompatible) TO backup; ' 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup; ' ) # checkunique parameter if ProbackupTest.enterprise: @@ -757,6 +754,12 @@ def test_checkdb_with_least_privileges(self): "backupdb", "GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool, bool) TO backup") + if ProbackupTest.enterprise: + node.safe_psql( + "backupdb", + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup;" + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;") + # checkdb try: self.checkdb_node( From d26a68fbb7cccf8bc8c1cc193f33b2ede4306ce5 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" <16117281+kulaginm@users.noreply.github.com> Date: Thu, 1 Sep 2022 14:38:17 +0300 Subject: [PATCH 062/339] =?UTF-8?q?[PBCKP-259]=20fix=20for=20'ERROR:=20Can?= =?UTF-8?q?not=20create=20directory=20for=20older=20backup'=E2=80=A6=20(#5?= =?UTF-8?q?26)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [PBCKP-259] fix for 'ERROR: Cannot create directory for older backup', rewrite --start_time implementation * rewritten 5f2283c8deac88ea49ea6223a3aa72e2cf462eb5 * fixes for several tests * disabled tests.merge.MergeTest.test_merge_backup_from_future and tests.restore.RestoreTest.test_restore_backup_from_future as incorrect for now Co-authored-by: d.lepikhova --- src/backup.c | 60 +++++- src/catalog.c | 97 ++++----- src/pg_probackup.c | 8 +- src/pg_probackup.h | 7 +- tests/backup.py | 364 ++++++++------------------------ tests/helpers/ptrack_helpers.py | 25 ++- tests/merge.py | 6 +- tests/restore.py | 6 +- 8 files changed, 217 insertions(+), 356 deletions(-) diff --git a/src/backup.c b/src/backup.c index 15f1a4d1c..890b040e7 100644 --- a/src/backup.c +++ b/src/backup.c @@ -685,6 +685,8 @@ pgdata_basic_setup(ConnectionOptions conn_opt, PGNodeInfo *nodeInfo) /* * Entry point of pg_probackup BACKUP subcommand. + * + * if start_time == INVALID_BACKUP_ID then we can generate backup_id */ int do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, @@ -692,8 +694,13 @@ do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, { PGconn *backup_conn = NULL; PGNodeInfo nodeInfo; + time_t latest_backup_id = INVALID_BACKUP_ID; char pretty_bytes[20]; + if (!instance_config.pgdata) + elog(ERROR, "required parameter not specified: PGDATA " + "(-D, --pgdata)"); + /* Initialize PGInfonode */ pgNodeInit(&nodeInfo); @@ -702,12 +709,55 @@ do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, (pg_strcasecmp(instance_config.external_dir_str, "none") != 0)) current.external_dir_str = instance_config.external_dir_str; - /* Create backup directory and BACKUP_CONTROL_FILE */ - pgBackupCreateDir(¤t, instanceState, start_time); + /* Find latest backup_id */ + { + parray *backup_list = catalog_get_backup_list(instanceState, INVALID_BACKUP_ID); - if (!instance_config.pgdata) - elog(ERROR, "required parameter not specified: PGDATA " - "(-D, --pgdata)"); + if (parray_num(backup_list) > 0) + latest_backup_id = ((pgBackup *)parray_get(backup_list, 0))->backup_id; + + parray_walk(backup_list, pgBackupFree); + parray_free(backup_list); + } + + /* Try to pick backup_id and create backup directory with BACKUP_CONTROL_FILE */ + if (start_time != INVALID_BACKUP_ID) + { + /* If user already choosed backup_id for us, then try to use it. */ + if (start_time <= latest_backup_id) + /* don't care about freeing base36enc_dup memory, we exit anyway */ + elog(ERROR, "Can't assign backup_id from requested start_time (%s), " + "this time must be later that backup %s", + base36enc_dup(start_time), base36enc_dup(latest_backup_id)); + + current.backup_id = start_time; + pgBackupInitDir(¤t, instanceState->instance_backup_subdir_path); + } + else + { + /* We can generate our own unique backup_id + * Sometimes (when we try to backup twice in one second) + * backup_id will be duplicated -> try more times. + */ + int attempts = 10; + + if (time(NULL) < latest_backup_id) + elog(ERROR, "Can't assign backup_id, there is already a backup in future (%s)", + base36enc(latest_backup_id)); + + do + { + current.backup_id = time(NULL); + pgBackupInitDir(¤t, instanceState->instance_backup_subdir_path); + if (current.backup_id == INVALID_BACKUP_ID) + sleep(1); + } + while (current.backup_id == INVALID_BACKUP_ID && attempts-- > 0); + } + + /* If creation of backup dir was unsuccessful, there will be WARNINGS in logs already */ + if (current.backup_id == INVALID_BACKUP_ID) + elog(ERROR, "Can't create backup directory"); /* Update backup status and other metainfo. */ current.status = BACKUP_STATUS_RUNNING; diff --git a/src/catalog.c b/src/catalog.c index 6aa26c6b3..e57ffc056 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -23,7 +23,7 @@ static pgBackup* get_closest_backup(timelineInfo *tlinfo); static pgBackup* get_oldest_backup(timelineInfo *tlinfo); static const char *backupModes[] = {"", "PAGE", "PTRACK", "DELTA", "FULL"}; static pgBackup *readBackupControlFile(const char *path); -static void create_backup_dir(pgBackup *backup, const char *backup_instance_path); +static int create_backup_dir(pgBackup *backup, const char *backup_instance_path); static bool backup_lock_exit_hook_registered = false; static parray *locks = NULL; @@ -976,6 +976,7 @@ catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id } else if (strcmp(base36enc(backup->start_time), data_ent->d_name) != 0) { + /* TODO there is no such guarantees */ elog(WARNING, "backup ID in control file \"%s\" doesn't match name of the backup folder \"%s\"", base36enc(backup->start_time), backup_conf_path); } @@ -1421,21 +1422,33 @@ get_multi_timeline_parent(parray *backup_list, parray *tli_list, return NULL; } -/* Create backup directory in $BACKUP_PATH - * Note, that backup_id attribute is updated, - * so it is possible to get diffrent values in +/* + * Create backup directory in $BACKUP_PATH + * (with proposed backup->backup_id) + * and initialize this directory. + * If creation of directory fails, then + * backup_id will be cleared (set to INVALID_BACKUP_ID). + * It is possible to get diffrent values in * pgBackup.start_time and pgBackup.backup_id. * It may be ok or maybe not, so it's up to the caller * to fix it or let it be. */ void -pgBackupCreateDir(pgBackup *backup, InstanceState *instanceState, time_t start_time) +pgBackupInitDir(pgBackup *backup, const char *backup_instance_path) { - int i; - parray *subdirs = parray_new(); - parray * backups; - pgBackup *target_backup; + int i; + char temp[MAXPGPATH]; + parray *subdirs; + /* Try to create backup directory at first */ + if (create_backup_dir(backup, backup_instance_path) != 0) + { + /* Clear backup_id as indication of error */ + backup->backup_id = INVALID_BACKUP_ID; + return; + } + + subdirs = parray_new(); parray_append(subdirs, pg_strdup(DATABASE_DIR)); /* Add external dirs containers */ @@ -1447,7 +1460,6 @@ pgBackupCreateDir(pgBackup *backup, InstanceState *instanceState, time_t start_t false); for (i = 0; i < parray_num(external_list); i++) { - char temp[MAXPGPATH]; /* Numeration of externaldirs starts with 1 */ makeExternalDirPathByNum(temp, EXTERNAL_DIR, i+1); parray_append(subdirs, pg_strdup(temp)); @@ -1455,30 +1467,6 @@ pgBackupCreateDir(pgBackup *backup, InstanceState *instanceState, time_t start_t free_dir_list(external_list); } - /* Get list of all backups*/ - backups = catalog_get_backup_list(instanceState, INVALID_BACKUP_ID); - if (parray_num(backups) > 0) - { - target_backup = (pgBackup *) parray_get(backups, 0); - if (start_time > target_backup->backup_id) - { - backup->backup_id = start_time; - create_backup_dir(backup, instanceState->instance_backup_subdir_path); - } - else - { - elog(ERROR, "Cannot create directory for older backup"); - } - } - else - { - backup->backup_id = start_time; - create_backup_dir(backup, instanceState->instance_backup_subdir_path); - } - - if (backup->backup_id == 0) - elog(ERROR, "Cannot create backup directory: %s", strerror(errno)); - backup->database_dir = pgut_malloc(MAXPGPATH); join_path_components(backup->database_dir, backup->root_dir, DATABASE_DIR); @@ -1488,10 +1476,8 @@ pgBackupCreateDir(pgBackup *backup, InstanceState *instanceState, time_t start_t /* create directories for actual backup files */ for (i = 0; i < parray_num(subdirs); i++) { - char path[MAXPGPATH]; - - join_path_components(path, backup->root_dir, parray_get(subdirs, i)); - fio_mkdir(FIO_BACKUP_HOST, path, DIR_PERMISSION, false); + join_path_components(temp, backup->root_dir, parray_get(subdirs, i)); + fio_mkdir(FIO_BACKUP_HOST, temp, DIR_PERMISSION, false); } free_dir_list(subdirs); @@ -1500,33 +1486,26 @@ pgBackupCreateDir(pgBackup *backup, InstanceState *instanceState, time_t start_t /* * Create root directory for backup, * update pgBackup.root_dir if directory creation was a success + * Return values (same as dir_create_dir()): + * 0 - ok + * -1 - error (warning message already emitted) */ -void +int create_backup_dir(pgBackup *backup, const char *backup_instance_path) { - int attempts = 10; + int rc; + char path[MAXPGPATH]; - while (attempts--) - { - int rc; - char path[MAXPGPATH]; - - join_path_components(path, backup_instance_path, base36enc(backup->backup_id)); + join_path_components(path, backup_instance_path, base36enc(backup->backup_id)); - rc = fio_mkdir(FIO_BACKUP_HOST, path, DIR_PERMISSION, true); - - if (rc == 0) - { - backup->root_dir = pgut_strdup(path); - return; - } - else - { - elog(WARNING, "Cannot create directory \"%s\": %s", path, strerror(errno)); - sleep(1); - } - } + /* TODO: add wrapper for remote mode */ + rc = fio_mkdir(FIO_BACKUP_HOST, path, DIR_PERMISSION, true); + if (rc == 0) + backup->root_dir = pgut_strdup(path); + else + elog(WARNING, "Cannot create directory \"%s\": %s", path, strerror(errno)); + return rc; } /* diff --git a/src/pg_probackup.c b/src/pg_probackup.c index abddf7fc1..e09b27872 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -78,7 +78,7 @@ pid_t my_pid = 0; __thread int my_thread_num = 1; bool progress = false; bool no_sync = false; -time_t start_time = 0; +time_t start_time = INVALID_BACKUP_ID; char *replication_slot = NULL; bool temp_slot = false; bool perm_slot = false; @@ -200,7 +200,6 @@ static ConfigOption cmd_options[] = { 's', 'i', "backup-id", &backup_id_string, SOURCE_CMD_STRICT }, { 'b', 133, "no-sync", &no_sync, SOURCE_CMD_STRICT }, { 'b', 134, "no-color", &no_color, SOURCE_CMD_STRICT }, - { 'U', 241, "start-time", &start_time, SOURCE_CMD_STRICT }, /* backup options */ { 'b', 180, "backup-pg-log", &backup_logs, SOURCE_CMD_STRICT }, { 'f', 'b', "backup-mode", opt_backup_mode, SOURCE_CMD_STRICT }, @@ -213,6 +212,7 @@ static ConfigOption cmd_options[] = { 'b', 184, "merge-expired", &merge_expired, SOURCE_CMD_STRICT }, { 'b', 185, "dry-run", &dry_run, SOURCE_CMD_STRICT }, { 's', 238, "note", &backup_note, SOURCE_CMD_STRICT }, + { 'U', 241, "start-time", &start_time, SOURCE_CMD_STRICT }, /* catchup options */ { 's', 239, "source-pgdata", &catchup_source_pgdata, SOURCE_CMD_STRICT }, { 's', 240, "destination-pgdata", &catchup_destination_pgdata, SOURCE_CMD_STRICT }, @@ -975,9 +975,7 @@ main(int argc, char *argv[]) case BACKUP_CMD: { current.stream = stream_wal; - if (start_time == 0) - start_time = current_time; - else + if (start_time != INVALID_BACKUP_ID) elog(WARNING, "Please do not use the --start-time option to start backup. " "This is a service option required to work with other extensions. " "We do not guarantee future support for this flag."); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 1b625cc65..876d3aea1 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -439,7 +439,10 @@ struct pgBackup { BackupMode backup_mode; /* Mode - one of BACKUP_MODE_xxx above*/ time_t backup_id; /* Identifier of the backup. - * Currently it's the same as start_time */ + * By default it's the same as start_time + * but can be increased if same backup_id + * already exists. It can be also set by + * start_time parameter */ BackupStatus status; /* Status - one of BACKUP_STATUS_xxx above*/ TimeLineID tli; /* timeline of start and stop backup lsns */ XLogRecPtr start_lsn; /* backup's starting transaction log location */ @@ -969,7 +972,7 @@ extern void write_backup_filelist(pgBackup *backup, parray *files, const char *root, parray *external_list, bool sync); -extern void pgBackupCreateDir(pgBackup *backup, InstanceState *instanceState, time_t start_time); +extern void pgBackupInitDir(pgBackup *backup, const char *backup_instance_path); extern void pgNodeInit(PGNodeInfo *node); extern void pgBackupInit(pgBackup *backup); extern void pgBackupFree(void *backup); diff --git a/tests/backup.py b/tests/backup.py index c720cb9d2..1fc4b86c3 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -1,7 +1,7 @@ import unittest import os from time import sleep, time -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import base36enc, ProbackupTest, ProbackupException import shutil from distutils.dir_util import copy_tree from testgres import ProcessType, QueryException @@ -313,7 +313,7 @@ def test_backup_detect_corruption(self): self.set_archiving(backup_dir, 'node', node) node.slow_start() - if self.ptrack and node.major_version > 11: + if self.ptrack: node.safe_psql( "postgres", "create extension ptrack") @@ -459,7 +459,7 @@ def test_backup_detect_invalid_block_header(self): self.set_archiving(backup_dir, 'node', node) node.slow_start() - if self.ptrack and node.major_version > 11: + if self.ptrack: node.safe_psql( "postgres", "create extension ptrack") @@ -600,7 +600,7 @@ def test_backup_detect_missing_permissions(self): self.set_archiving(backup_dir, 'node', node) node.slow_start() - if self.ptrack and node.major_version > 11: + if self.ptrack: node.safe_psql( "postgres", "create extension ptrack") @@ -3199,10 +3199,11 @@ def test_pg_stop_backup_missing_permissions(self): # @unittest.skip("skip") def test_start_time(self): - + """Test, that option --start-time allows to set backup_id and restore""" fname = self.id().split('.')[3] node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) @@ -3215,138 +3216,81 @@ def test_start_time(self): # FULL backup startTime = int(time()) self.backup_node( - backup_dir, 'node', node, backup_type="full", - options=['--stream', '--start-time', str(startTime)]) - - # DELTA backup - startTime = int(time()) - self.backup_node( - backup_dir, 'node', node, backup_type="delta", - options=['--stream', '--start-time', str(startTime)]) - - # PAGE backup - startTime = int(time()) - self.backup_node( - backup_dir, 'node', node, backup_type="page", - options=['--stream', '--start-time', str(startTime)]) - - if self.ptrack and node.major_version > 11: - node.safe_psql( - "postgres", - "create extension ptrack") - - # PTRACK backup - startTime = int(time()) - self.backup_node( - backup_dir, 'node', node, backup_type="ptrack", - options=['--stream', '--start-time', str(startTime)]) - - # Clean after yourself - self.del_test_dir(module_name, fname) - - # @unittest.skip("skip") - def test_start_time_incorrect_time(self): - - fname = self.id().split('.')[3] - node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) - - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() + backup_dir, 'node', node, backup_type='full', + options=['--stream', '--start-time={0}'.format(str(startTime))]) + # restore FULL backup by backup_id calculated from start-time + self.restore_node( + backup_dir, 'node', + data_dir=os.path.join(self.tmp_path, module_name, fname, 'node_restored_full'), + backup_id=base36enc(startTime)) - startTime = int(time()) - #backup with correct start time - self.backup_node( - backup_dir, 'node', node, - options=['--stream', '--start-time', str(startTime)]) - #backups with incorrect start time + #FULL backup with incorrect start time try: + startTime = str(int(time()-100000)) self.backup_node( - backup_dir, 'node', node, backup_type="full", - options=['--stream', '--start-time', str(startTime-10000)]) + backup_dir, 'node', node, backup_type='full', + options=['--stream', '--start-time={0}'.format(startTime)]) # we should die here because exception is what we expect to happen self.assertEqual( 1, 0, - "Expecting Error because start time for new backup must be newer " - "\n Output: {0} \n CMD: {1}".format( + 'Expecting Error because start time for new backup must be newer ' + '\n Output: {0} \n CMD: {1}'.format( repr(self.output), self.cmd)) except ProbackupException as e: self.assertRegex( e.message, - "ERROR: Cannot create directory for older backup", + r"ERROR: Can't assign backup_id from requested start_time \(\w*\), this time must be later that backup \w*\n", "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) - try: - self.backup_node( - backup_dir, 'node', node, backup_type="delta", - options=['--stream', '--start-time', str(startTime-10000)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because start time for new backup must be newer " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertRegex( - e.message, - "ERROR: Cannot create directory for older backup", - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + # DELTA backup + startTime = int(time()) + self.backup_node( + backup_dir, 'node', node, backup_type='delta', + options=['--stream', '--start-time={0}'.format(str(startTime))]) + # restore DELTA backup by backup_id calculated from start-time + self.restore_node( + backup_dir, 'node', + data_dir=os.path.join(self.tmp_path, module_name, fname, 'node_restored_delta'), + backup_id=base36enc(startTime)) - try: - self.backup_node( - backup_dir, 'node', node, backup_type="page", - options=['--stream', '--start-time', str(startTime-10000)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because start time for new backup must be newer " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertRegex( - e.message, - "ERROR: Cannot create directory for older backup", - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + # PAGE backup + startTime = int(time()) + self.backup_node( + backup_dir, 'node', node, backup_type='page', + options=['--stream', '--start-time={0}'.format(str(startTime))]) + # restore PAGE backup by backup_id calculated from start-time + self.restore_node( + backup_dir, 'node', + data_dir=os.path.join(self.tmp_path, module_name, fname, 'node_restored_page'), + backup_id=base36enc(startTime)) - if self.ptrack and node.major_version > 11: + # PTRACK backup + if self.ptrack: node.safe_psql( - "postgres", - "create extension ptrack") + 'postgres', + 'create extension ptrack') - try: - self.backup_node( - backup_dir, 'node', node, backup_type="page", - options=['--stream', '--start-time', str(startTime-10000)]) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because start time for new backup must be newer " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertRegex( - e.message, - "ERROR: Cannot create directory for older backup", - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + startTime = int(time()) + self.backup_node( + backup_dir, 'node', node, backup_type='ptrack', + options=['--stream', '--start-time={0}'.format(str(startTime))]) + # restore PTRACK backup by backup_id calculated from start-time + self.restore_node( + backup_dir, 'node', + data_dir=os.path.join(self.tmp_path, module_name, fname, 'node_restored_ptrack'), + backup_id=base36enc(startTime)) # Clean after yourself self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_start_time_few_nodes(self): - + """Test, that we can synchronize backup_id's for different DBs""" fname = self.id().split('.')[3] node1 = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node1'), + set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) @@ -3358,6 +3302,7 @@ def test_start_time_few_nodes(self): node2 = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node2'), + set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) @@ -3368,200 +3313,61 @@ def test_start_time_few_nodes(self): node2.slow_start() # FULL backup - startTime = int(time()) + startTime = str(int(time())) self.backup_node( - backup_dir1, 'node1', node1, backup_type="full", - options=['--stream', '--start-time', str(startTime)]) + backup_dir1, 'node1', node1, backup_type='full', + options=['--stream', '--start-time={0}'.format(startTime)]) self.backup_node( - backup_dir2, 'node2', node2, backup_type="full", - options=['--stream', '--start-time', str(startTime)]) - + backup_dir2, 'node2', node2, backup_type='full', + options=['--stream', '--start-time={0}'.format(startTime)]) show_backup1 = self.show_pb(backup_dir1, 'node1')[0] show_backup2 = self.show_pb(backup_dir2, 'node2')[0] self.assertEqual(show_backup1['id'], show_backup2['id']) # DELTA backup - startTime = int(time()) + startTime = str(int(time())) self.backup_node( - backup_dir1, 'node1', node1, backup_type="delta", - options=['--stream', '--start-time', str(startTime)]) + backup_dir1, 'node1', node1, backup_type='delta', + options=['--stream', '--start-time={0}'.format(startTime)]) self.backup_node( - backup_dir2, 'node2', node2, backup_type="delta", - options=['--stream', '--start-time', str(startTime)]) + backup_dir2, 'node2', node2, backup_type='delta', + options=['--stream', '--start-time={0}'.format(startTime)]) show_backup1 = self.show_pb(backup_dir1, 'node1')[1] show_backup2 = self.show_pb(backup_dir2, 'node2')[1] self.assertEqual(show_backup1['id'], show_backup2['id']) # PAGE backup - startTime = int(time()) + startTime = str(int(time())) self.backup_node( - backup_dir1, 'node1', node1, backup_type="page", - options=['--stream', '--start-time', str(startTime)]) + backup_dir1, 'node1', node1, backup_type='page', + options=['--stream', '--start-time={0}'.format(startTime)]) self.backup_node( - backup_dir2, 'node2', node2, backup_type="page", - options=['--stream', '--start-time', str(startTime)]) + backup_dir2, 'node2', node2, backup_type='page', + options=['--stream', '--start-time={0}'.format(startTime)]) show_backup1 = self.show_pb(backup_dir1, 'node1')[2] show_backup2 = self.show_pb(backup_dir2, 'node2')[2] self.assertEqual(show_backup1['id'], show_backup2['id']) # PTRACK backup - startTime = int(time()) - if self.ptrack and node1.major_version > 11: + if self.ptrack: node1.safe_psql( - "postgres", - "create extension ptrack") - self.backup_node( - backup_dir1, 'node1', node1, backup_type="ptrack", - options=['--stream', '--start-time', str(startTime)]) - - if self.ptrack and node2.major_version > 11: + 'postgres', + 'create extension ptrack') node2.safe_psql( - "postgres", - "create extension ptrack") - self.backup_node( - backup_dir2, 'node2', node2, backup_type="ptrack", - options=['--stream', '--start-time', str(startTime)]) - show_backup1 = self.show_pb(backup_dir1, 'node1')[3] - show_backup2 = self.show_pb(backup_dir2, 'node2')[3] - self.assertEqual(show_backup1['id'], show_backup2['id']) - - # Clean after yourself - self.del_test_dir(module_name, fname) - - # @unittest.skip("skip") - def test_start_time_few_nodes_incorrect_time(self): - - fname = self.id().split('.')[3] - node1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node1'), - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) - - backup_dir1 = os.path.join(self.tmp_path, module_name, fname, 'backup1') - self.init_pb(backup_dir1) - self.add_instance(backup_dir1, 'node1', node1) - self.set_archiving(backup_dir1, 'node1', node1) - node1.slow_start() - - node2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node2'), - ptrack_enable=self.ptrack, - initdb_params=['--data-checksums']) - - backup_dir2 = os.path.join(self.tmp_path, module_name, fname, 'backup2') - self.init_pb(backup_dir2) - self.add_instance(backup_dir2, 'node2', node2) - self.set_archiving(backup_dir2, 'node2', node2) - node2.slow_start() - - # FULL backup - startTime = int(time()) - self.backup_node( - backup_dir1, 'node1', node1, backup_type="full", - options=['--stream', '--start-time', str(startTime)]) - self.backup_node( - backup_dir2, 'node2', node2, backup_type="full", - options=['--stream', '--start-time', str(startTime-10000)]) - - show_backup1 = self.show_pb(backup_dir1, 'node1')[0] - show_backup2 = self.show_pb(backup_dir2, 'node2')[0] - self.assertGreater(show_backup1['id'], show_backup2['id']) - - # DELTA backup - startTime = int(time()) - self.backup_node( - backup_dir1, 'node1', node1, backup_type="delta", - options=['--stream', '--start-time', str(startTime)]) - # make backup with start time definitelly earlier, than existing - try: - self.backup_node( - backup_dir2, 'node2', node2, backup_type="delta", - options=['--stream', '--start-time', str(10000)]) - self.assertEqual( - 1, 0, - "Expecting Error because start time for new backup must be newer " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertRegex( - e.message, - "ERROR: Cannot create directory for older backup", - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - show_backup1 = self.show_pb(backup_dir1, 'node1')[1] - show_backup2 = self.show_pb(backup_dir2, 'node2')[0] - self.assertGreater(show_backup1['id'], show_backup2['id']) + 'postgres', + 'create extension ptrack') - # PAGE backup - startTime = int(time()) - self.backup_node( - backup_dir1, 'node1', node1, backup_type="page", - options=['--stream', '--start-time', str(startTime)]) - # make backup with start time definitelly earlier, than existing - try: + startTime = str(int(time())) self.backup_node( - backup_dir2, 'node2', node2, backup_type="page", - options=['--stream', '--start-time', str(10000)]) - self.assertEqual( - 1, 0, - "Expecting Error because start time for new backup must be newer " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertRegex( - e.message, - "ERROR: Cannot create directory for older backup", - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - show_backup1 = self.show_pb(backup_dir1, 'node1')[2] - show_backup2 = self.show_pb(backup_dir2, 'node2')[0] - self.assertGreater(show_backup1['id'], show_backup2['id']) - - # PTRACK backup - startTime = int(time()) - if self.ptrack and node1.major_version > 11: - node1.safe_psql( - "postgres", - "create extension ptrack") + backup_dir1, 'node1', node1, backup_type='ptrack', + options=['--stream', '--start-time={0}'.format(startTime)]) self.backup_node( - backup_dir1, 'node1', node1, backup_type="ptrack", - options=['--stream', '--start-time', str(startTime)]) - - if self.ptrack and node2.major_version > 11: - node2.safe_psql( - "postgres", - "create extension ptrack") - # make backup with start time definitelly earlier, than existing - try: - self.backup_node( - backup_dir2, 'node2', node2, backup_type="ptrack", - options=['--stream', '--start-time', str(10000)]) - self.assertEqual( - 1, 0, - "Expecting Error because start time for new backup must be newer " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertRegex( - e.message, - "ERROR: Cannot create directory for older backup", - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - # FULL backup - startTime = int(time()) - self.backup_node( - backup_dir1, 'node1', node1, backup_type="full", - options=['--stream', '--start-time', str(startTime)]) - self.backup_node( - backup_dir2, 'node2', node2, backup_type="full", - options=['--stream', '--start-time', str(startTime)]) - - show_backup1 = self.show_pb(backup_dir1, 'node1')[4] - show_backup2 = self.show_pb(backup_dir2, 'node2')[1] - self.assertEqual(show_backup1['id'], show_backup2['id']) + backup_dir2, 'node2', node2, backup_type='ptrack', + options=['--stream', '--start-time={0}'.format(startTime)]) + show_backup1 = self.show_pb(backup_dir1, 'node1')[3] + show_backup2 = self.show_pb(backup_dir2, 'node2')[3] + self.assertEqual(show_backup1['id'], show_backup2['id']) # Clean after yourself self.del_test_dir(module_name, fname) + diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index e7390d6b1..2401261d6 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -110,6 +110,26 @@ def is_nls_enabled(): return b'enable-nls' in p.communicate()[0] +def base36enc(number): + """Converts an integer to a base36 string.""" + alphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' + base36 = '' + sign = '' + + if number < 0: + sign = '-' + number = -number + + if 0 <= number < len(alphabet): + return sign + alphabet[number] + + while number != 0: + number, i = divmod(number, len(alphabet)) + base36 = alphabet[i] + base36 + + return sign + base36 + + class ProbackupException(Exception): def __init__(self, message, cmd): self.message = message @@ -910,7 +930,7 @@ def backup_node( backup_type='full', datname=False, options=[], asynchronous=False, gdb=False, old_binary=False, return_id=True, no_remote=False, - env=None, startTime=None + env=None ): if not node and not data_dir: print('You must provide ether node or data_dir for backup') @@ -943,9 +963,6 @@ def backup_node( if not old_binary: cmd_list += ['--no-sync'] - if startTime: - cmd_list += ['--start-time', startTime] - return self.run_pb(cmd_list + options, asynchronous, gdb, old_binary, return_id, env=env) def checkdb_node( diff --git a/tests/merge.py b/tests/merge.py index 72b3a6089..148e73b5b 100644 --- a/tests/merge.py +++ b/tests/merge.py @@ -1967,7 +1967,11 @@ def test_failed_merge_after_delete_3(self): self.del_test_dir(module_name, fname) - # @unittest.skip("skip") + # Skipped, because backups from the future are invalid. + # This cause a "ERROR: Can't assign backup_id, there is already a backup in future" + # now (PBCKP-259). We can conduct such a test again when we + # untie 'backup_id' from 'start_time' + @unittest.skip("skip") def test_merge_backup_from_future(self): """ take FULL backup, table PAGE backup from future, diff --git a/tests/restore.py b/tests/restore.py index d6246b3e2..3f50ae71e 100644 --- a/tests/restore.py +++ b/tests/restore.py @@ -1845,7 +1845,11 @@ def test_restore_chain_with_corrupted_backup(self): # Clean after yourself self.del_test_dir(module_name, fname) - # @unittest.skip("skip") + # Skipped, because backups from the future are invalid. + # This cause a "ERROR: Can't assign backup_id, there is already a backup in future" + # now (PBCKP-259). We can conduct such a test again when we + # untie 'backup_id' from 'start_time' + @unittest.skip("skip") def test_restore_backup_from_future(self): """more complex test_restore_chain()""" fname = self.id().split('.')[3] From cba77dd77c78b599df538eda0a38a33a798270aa Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sat, 15 Oct 2022 13:31:47 +0300 Subject: [PATCH 063/339] [PBCKP-235] Pg15 compatibility Author: Daniel Shelepanov --- .travis.yml | 3 +- src/backup.c | 52 ++++++++++++++- src/parsexlog.c | 20 +++++- src/stream.c | 9 ++- tests/archive.py | 11 +++- tests/auth_test.py | 50 +++++++++----- tests/helpers/ptrack_helpers.py | 46 ++++++++----- tests/ptrack.py | 110 +++++++++++++++++++++---------- tests/restore.py | 113 +++++++++++++++++++++----------- 9 files changed, 302 insertions(+), 112 deletions(-) diff --git a/.travis.yml b/.travis.yml index 9e48c9cab..142c01f93 100644 --- a/.travis.yml +++ b/.travis.yml @@ -26,7 +26,8 @@ notifications: # Default MODE is basic, i.e. all tests with PG_PROBACKUP_TEST_BASIC=ON env: - - PG_VERSION=15 PG_BRANCH=master PTRACK_PATCH_PG_BRANCH=master + - PG_VERSION=16 PG_BRANCH=master PTRACK_PATCH_PG_BRANCH=master + - PG_VERSION=15 PG_BRANCH=REL_15_STABLE PTRACK_PATCH_PG_BRANCH=REL_15_STABLE - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_BRANCH=REL_14_STABLE - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE - PG_VERSION=12 PG_BRANCH=REL_12_STABLE PTRACK_PATCH_PG_BRANCH=REL_12_STABLE diff --git a/src/backup.c b/src/backup.c index c3f3e888d..b3a44ab6e 100644 --- a/src/backup.c +++ b/src/backup.c @@ -1030,12 +1030,20 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup, uint32 lsn_lo; params[0] = label; +#if PG_VERSION_NUM >= 150000 + elog(INFO, "wait for pg_backup_start()"); +#else elog(INFO, "wait for pg_start_backup()"); +#endif /* 2nd argument is 'fast'*/ params[1] = smooth ? "false" : "true"; res = pgut_execute(conn, +#if PG_VERSION_NUM >= 150000 + "SELECT pg_catalog.pg_backup_start($1, $2)", +#else "SELECT pg_catalog.pg_start_backup($1, $2, false)", +#endif 2, params); @@ -1575,6 +1583,14 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica " labelfile," " spcmapfile" " FROM pg_catalog.pg_stop_backup(false, false)", + stop_backup_on_master_after15_query[] = + "SELECT" + " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," + " current_timestamp(0)::timestamptz," + " lsn," + " labelfile," + " spcmapfile" + " FROM pg_catalog.pg_backup_stop(false)", /* * In case of backup from replica we do not trust minRecPoint * and stop_backup LSN, so we use latest replayed LSN as STOP LSN. @@ -1586,12 +1602,26 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica " pg_catalog.pg_last_wal_replay_lsn()," " labelfile," " spcmapfile" - " FROM pg_catalog.pg_stop_backup(false, false)"; + " FROM pg_catalog.pg_stop_backup(false, false)", + stop_backup_on_replica_after15_query[] = + "SELECT" + " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," + " current_timestamp(0)::timestamptz," + " pg_catalog.pg_last_wal_replay_lsn()," + " labelfile," + " spcmapfile" + " FROM pg_catalog.pg_backup_stop(false)"; const char * const stop_backup_query = - is_started_on_replica ? + server_version >= 150000 ? + (is_started_on_replica ? + stop_backup_on_replica_after15_query : + stop_backup_on_master_after15_query + ) : + (is_started_on_replica ? stop_backup_on_replica_query : - stop_backup_on_master_query; + stop_backup_on_master_query + ); bool sent = false; /* Make proper timestamp format for parse_time(recovery_time) */ @@ -1606,7 +1636,11 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica */ sent = pgut_send(conn, stop_backup_query, 0, NULL, WARNING); if (!sent) +#if PG_VERSION_NUM >= 150000 + elog(ERROR, "Failed to send pg_backup_stop query"); +#else elog(ERROR, "Failed to send pg_stop_backup query"); +#endif /* After we have sent pg_stop_backup, we don't need this callback anymore */ pgut_atexit_pop(backup_stopbackup_callback, &stop_callback_params); @@ -1652,7 +1686,11 @@ pg_stop_backup_consume(PGconn *conn, int server_version, if (interrupted) { pgut_cancel(conn); +#if PG_VERSION_NUM >= 150000 + elog(ERROR, "interrupted during waiting for pg_backup_stop"); +#else elog(ERROR, "interrupted during waiting for pg_stop_backup"); +#endif } if (pg_stop_backup_timeout == 1) @@ -1665,7 +1703,11 @@ pg_stop_backup_consume(PGconn *conn, int server_version, if (pg_stop_backup_timeout > timeout) { pgut_cancel(conn); +#if PG_VERSION_NUM >= 150000 + elog(ERROR, "pg_backup_stop doesn't answer in %d seconds, cancel it", timeout); +#else elog(ERROR, "pg_stop_backup doesn't answer in %d seconds, cancel it", timeout); +#endif } } else @@ -1677,7 +1719,11 @@ pg_stop_backup_consume(PGconn *conn, int server_version, /* Check successfull execution of pg_stop_backup() */ if (!query_result) +#if PG_VERSION_NUM >= 150000 + elog(ERROR, "pg_backup_stop() failed"); +#else elog(ERROR, "pg_stop_backup() failed"); +#endif else { switch (PQresultStatus(query_result)) diff --git a/src/parsexlog.c b/src/parsexlog.c index 39fb64f0a..e8010796e 100644 --- a/src/parsexlog.c +++ b/src/parsexlog.c @@ -29,8 +29,13 @@ * RmgrNames is an array of resource manager names, to make error messages * a bit nicer. */ +#if PG_VERSION_NUM >= 150000 +#define PG_RMGR(symname,name,redo,desc,identify,startup,cleanup,mask,decode) \ + name, +#else #define PG_RMGR(symname,name,redo,desc,identify,startup,cleanup,mask) \ name, +#endif static const char *RmgrNames[RM_MAX_ID + 1] = { #include "access/rmgrlist.h" @@ -1764,7 +1769,12 @@ extractPageInfo(XLogReaderState *record, XLogReaderData *reader_data, /* Is this a special record type that I recognize? */ - if (rmid == RM_DBASE_ID && rminfo == XLOG_DBASE_CREATE) + if (rmid == RM_DBASE_ID +#if PG_VERSION_NUM >= 150000 + && (rminfo == XLOG_DBASE_CREATE_WAL_LOG || rminfo == XLOG_DBASE_CREATE_FILE_COPY)) +#else + && rminfo == XLOG_DBASE_CREATE) +#endif { /* * New databases can be safely ignored. They would be completely @@ -1818,13 +1828,21 @@ extractPageInfo(XLogReaderState *record, XLogReaderData *reader_data, RmgrNames[rmid], info); } +#if PG_VERSION_NUM >= 150000 + for (block_id = 0; block_id <= record->record->max_block_id; block_id++) +#else for (block_id = 0; block_id <= record->max_block_id; block_id++) +#endif { RelFileNode rnode; ForkNumber forknum; BlockNumber blkno; +#if PG_VERSION_NUM >= 150000 + if (!XLogRecGetBlockTagExtended(record, block_id, &rnode, &forknum, &blkno, NULL)) +#else if (!XLogRecGetBlockTag(record, block_id, &rnode, &forknum, &blkno)) +#endif continue; /* We only care about the main fork; others are copied as is */ diff --git a/src/stream.c b/src/stream.c index e2e016f4d..df4606c0a 100644 --- a/src/stream.c +++ b/src/stream.c @@ -260,11 +260,18 @@ StreamLog(void *arg) ctl.synchronous = false; ctl.mark_done = false; +#if PG_VERSION_NUM >= 150000 + ctl.walmethod = CreateWalDirectoryMethod( + stream_arg->basedir, + PG_COMPRESSION_NONE, + 0, + false); +#else /* PG_VERSION_NUM >= 100000 && PG_VERSION_NUM < 150000 */ ctl.walmethod = CreateWalDirectoryMethod( stream_arg->basedir, -// (instance_config.compress_alg == NONE_COMPRESS) ? 0 : instance_config.compress_level, 0, false); +#endif /* PG_VERSION_NUM >= 150000 */ ctl.replication_slot = replication_slot; ctl.stop_socket = PGINVALID_SOCKET; ctl.do_sync = false; /* We sync all files at the end of backup */ diff --git a/tests/archive.py b/tests/archive.py index be5e33fbc..9f02a04a9 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -330,9 +330,14 @@ def test_pgpro434_4(self): with open(log_file, 'r') as f: log_content = f.read() - self.assertIn( - "ERROR: pg_stop_backup doesn't answer in 60 seconds, cancel it", - log_content) + if self.get_version(node) < 150000: + self.assertIn( + "ERROR: pg_stop_backup doesn't answer in 60 seconds, cancel it", + log_content) + else: + self.assertIn( + "ERROR: pg_backup_stop doesn't answer in 60 seconds, cancel it", + log_content) log_file = os.path.join(node.logs_dir, 'postgresql.log') with open(log_file, 'r') as f: diff --git a/tests/auth_test.py b/tests/auth_test.py index 16c73308f..3cec09211 100644 --- a/tests/auth_test.py +++ b/tests/auth_test.py @@ -51,16 +51,29 @@ def test_backup_via_unprivileged_user(self): 1, 0, "Expecting Error due to missing grant on EXECUTE.") except ProbackupException as e: - self.assertIn( - "ERROR: query failed: ERROR: permission denied " - "for function pg_start_backup", e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - - node.safe_psql( - "postgres", - "GRANT EXECUTE ON FUNCTION" - " pg_start_backup(text, boolean, boolean) TO backup;") + if self.get_version(node) < 150000: + self.assertIn( + "ERROR: query failed: ERROR: permission denied " + "for function pg_start_backup", e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + else: + self.assertIn( + "ERROR: query failed: ERROR: permission denied " + "for function pg_backup_start", e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + if self.get_version(node) < 150000: + node.safe_psql( + "postgres", + "GRANT EXECUTE ON FUNCTION" + " pg_start_backup(text, boolean, boolean) TO backup;") + else: + node.safe_psql( + "postgres", + "GRANT EXECUTE ON FUNCTION" + " pg_backup_start(text, boolean) TO backup;") node.safe_psql( 'postgres', @@ -92,11 +105,18 @@ def test_backup_via_unprivileged_user(self): 1, 0, "Expecting Error due to missing grant on EXECUTE.") except ProbackupException as e: - self.assertIn( - "ERROR: query failed: ERROR: permission denied " - "for function pg_stop_backup", e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + if self.get_version(node) < 150000: + self.assertIn( + "ERROR: query failed: ERROR: permission denied " + "for function pg_stop_backup", e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + else: + self.assertIn( + "ERROR: query failed: ERROR: permission denied " + "for function pg_backup_stop", e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) node.safe_psql( "postgres", diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 2401261d6..2fa3ccfec 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -446,21 +446,37 @@ def simple_bootstrap(self, node, role) -> None: 'postgres', 'CREATE ROLE {0} WITH LOGIN REPLICATION'.format(role)) - # PG >= 10 - node.safe_psql( - 'postgres', - 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO {0};'.format(role)) + if self.get_version(node) < 150000: + node.safe_psql( + 'postgres', + 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO {0};'.format(role)) + # >= 15 + else: + node.safe_psql( + 'postgres', + 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO {0};'.format(role)) def create_tblspace_in_node(self, node, tblspc_name, tblspc_path=None, cfs=False): res = node.execute( diff --git a/tests/ptrack.py b/tests/ptrack.py index b5cc384bb..94980a90f 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -513,41 +513,78 @@ def test_ptrack_unprivileged(self): "postgres", "CREATE DATABASE backupdb") - # PG >= 10 - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) + # < 15 + if self.get_version(node) < 150000: + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) + # >= 15 + else: + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) node.safe_psql( "backupdb", @@ -566,7 +603,8 @@ def test_ptrack_unprivileged(self): if ProbackupTest.enterprise: node.safe_psql( "backupdb", - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup") + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; " + 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;') self.backup_node( backup_dir, 'node', node, diff --git a/tests/restore.py b/tests/restore.py index 3f50ae71e..6ecf895ae 100644 --- a/tests/restore.py +++ b/tests/restore.py @@ -3189,42 +3189,80 @@ def test_missing_database_map(self): "postgres", "CREATE DATABASE backupdb") - # PG >= 10 - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) + # < 15 + if self.get_version(node) >= 100000 and self.get_version(node) < 150000: + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) + # >= 15 + else: + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) if self.ptrack: # TODO why backup works without these grants ? @@ -3241,7 +3279,8 @@ def test_missing_database_map(self): node.safe_psql( "backupdb", - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup") + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;") # FULL backup without database_map backup_id = self.backup_node( From 4ecb11f45db906375358be9c9ad0106434169374 Mon Sep 17 00:00:00 2001 From: Daniel Shelepanov Date: Thu, 20 Oct 2022 07:16:40 +0300 Subject: [PATCH 064/339] [PBCKP-234] proper pioListDir usage tags: pb_probackup --- src/archive.c | 2 +- src/backup.c | 11 +- src/catalog.c | 3 +- src/catchup.c | 12 +- src/checkdb.c | 4 +- src/delete.c | 32 +----- src/dir.c | 249 +++++++++++++++++++++++----------------- src/merge.c | 30 +---- src/pg_probackup.h | 7 +- src/restore.c | 5 +- src/utils/file.c | 278 ++++++++++++++++++++++++++++++--------------- src/utils/file.h | 14 ++- src/utils/parray.c | 28 +++++ src/utils/parray.h | 4 + 14 files changed, 395 insertions(+), 284 deletions(-) diff --git a/src/archive.c b/src/archive.c index 1ad45e61f..90c54c15a 100644 --- a/src/archive.c +++ b/src/archive.c @@ -646,7 +646,7 @@ setup_push_filelist(const char *archive_status_dir, const char *first_file, /* get list of files from archive_status */ status_files = parray_new(); - dir_list_file(status_files, archive_status_dir, false, false, false, false, true, 0, FIO_DB_HOST); + db_list_dir(status_files, archive_status_dir, false, false, 0); parray_qsort(status_files, pgFileCompareName); for (i = 0; i < parray_num(status_files); i++) diff --git a/src/backup.c b/src/backup.c index b3a44ab6e..5f8a2eecd 100644 --- a/src/backup.c +++ b/src/backup.c @@ -269,8 +269,8 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, join_path_components(external_prefix, current.root_dir, EXTERNAL_DIR); /* list files with the logical path. omit $PGDATA */ - fio_list_dir(backup_files_list, instance_config.pgdata, - true, true, false, backup_logs, true, 0); + db_list_dir(backup_files_list, instance_config.pgdata, true, backup_logs, 0); + exclude_files(backup_files_list, backup_logs); /* * Get database_map (name to oid) for use in partial restore feature. @@ -288,12 +288,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, { /* External dirs numeration starts with 1. * 0 value is not external dir */ - if (fio_is_remote(FIO_DB_HOST)) - fio_list_dir(backup_files_list, parray_get(external_dirs, i), - false, true, false, false, true, i+1); - else - dir_list_file(backup_files_list, parray_get(external_dirs, i), - false, true, false, false, true, i+1, FIO_LOCAL_HOST); + db_list_dir(backup_files_list, parray_get(external_dirs, i), false, false, i+1); } } diff --git a/src/catalog.c b/src/catalog.c index fd49c70e6..3c0396e1c 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1525,8 +1525,7 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) char end_segno_str[MAXFNAMELEN]; /* read all xlog files that belong to this archive */ - dir_list_file(xlog_files_list, instanceState->instance_wal_subdir_path, - false, true, false, false, true, 0, FIO_BACKUP_HOST); + backup_list_dir(xlog_files_list, instanceState->instance_wal_subdir_path); parray_qsort(xlog_files_list, pgFileCompareName); timelineinfos = parray_new(); diff --git a/src/catchup.c b/src/catchup.c index 08bc039f9..8034fba0a 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -647,9 +647,9 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, if (current.backup_mode != BACKUP_MODE_FULL) { dest_filelist = parray_new(); - dir_list_file(dest_filelist, dest_pgdata, - true, true, false, backup_logs, true, 0, FIO_LOCAL_HOST); + db_list_dir(dest_filelist, dest_pgdata, true, backup_logs, 0); filter_filelist(dest_filelist, dest_pgdata, exclude_absolute_paths_list, exclude_relative_paths_list, "Destination"); + exclude_files(dest_filelist, backup_logs); // fill dest_redo.lsn and dest_redo.tli get_redo(FIO_LOCAL_HOST, dest_pgdata, &dest_redo); @@ -714,12 +714,8 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, source_filelist = parray_new(); /* list files with the logical path. omit $PGDATA */ - if (fio_is_remote(FIO_DB_HOST)) - fio_list_dir(source_filelist, source_pgdata, - true, true, false, backup_logs, true, 0); - else - dir_list_file(source_filelist, source_pgdata, - true, true, false, backup_logs, true, 0, FIO_LOCAL_HOST); + db_list_dir(source_filelist, source_pgdata, true, backup_logs, 0); + exclude_files(source_filelist, backup_logs); //REVIEW FIXME. Let's fix that before release. // TODO what if wal is not a dir (symlink to a dir)? diff --git a/src/checkdb.c b/src/checkdb.c index f1a5fcf78..b3e2a9060 100644 --- a/src/checkdb.c +++ b/src/checkdb.c @@ -207,8 +207,8 @@ do_block_validation(char *pgdata, uint32 checksum_version) files_list = parray_new(); /* list files with the logical path. omit $PGDATA */ - dir_list_file(files_list, pgdata, true, true, - false, false, true, 0, FIO_DB_HOST); + db_list_dir(files_list, pgdata, true, false, 0); + exclude_files(files_list, false); /* * Sort pathname ascending. diff --git a/src/delete.c b/src/delete.c index d6778d661..b6ed23966 100644 --- a/src/delete.c +++ b/src/delete.c @@ -731,11 +731,7 @@ do_retention_wal(InstanceState *instanceState, bool dry_run) void delete_backup_files(pgBackup *backup) { - size_t i; char timestamp[100]; - parray *files; - size_t num_files; - char full_path[MAXPGPATH]; /* * If the backup was deleted already, there is nothing to do. @@ -761,32 +757,8 @@ delete_backup_files(pgBackup *backup) */ write_backup_status(backup, BACKUP_STATUS_DELETING, false); - /* list files to be deleted */ - files = parray_new(); - dir_list_file(files, backup->root_dir, false, false, true, false, false, 0, FIO_BACKUP_HOST); - - /* delete leaf node first */ - parray_qsort(files, pgFileCompareRelPathWithExternalDesc); - num_files = parray_num(files); - for (i = 0; i < num_files; i++) - { - pgFile *file = (pgFile *) parray_get(files, i); - - join_path_components(full_path, backup->root_dir, file->rel_path); - - if (interrupted) - elog(ERROR, "interrupted during delete backup"); - - if (progress) - elog(INFO, "Progress: (%zd/%zd). Delete file \"%s\"", - i + 1, num_files, full_path); - - if (fio_remove(FIO_BACKUP_HOST, full_path, false) != 0) - elog(ERROR, "Cannot remove file or directory \"%s\": %s", full_path, strerror(errno)); - } - - parray_walk(files, pgFileFree); - parray_free(files); + pioDrive_i drive = pioDriveForLocation(FIO_BACKUP_HOST); + $i(pioRemoveDir, drive, .root = backup->root_dir, .root_as_well = true); backup->status = BACKUP_STATUS_DELETED; return; diff --git a/src/dir.c b/src/dir.c index 4920b89ea..0c793753c 100644 --- a/src/dir.c +++ b/src/dir.c @@ -117,10 +117,16 @@ typedef struct TablespaceCreatedList TablespaceCreatedListCell *tail; } TablespaceCreatedList; +typedef struct exclude_cb_ctx { + bool backup_logs; + size_t pref_len; + char exclude_dir_content_pref[MAXPGPATH]; +} exclude_cb_ctx; + static char dir_check_file(pgFile *file, bool backup_logs); static void dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, - bool exclude, bool follow_symlink, bool backup_logs, + bool handle_tablespaces, bool follow_symlink, bool backup_logs, bool skip_hidden, int external_dir_num, fio_location location); static void opt_path_map(ConfigOption *opt, const char *arg, TablespaceList *list, const char *type); @@ -128,6 +134,7 @@ static void cleanup_tablespace(const char *path); static void control_string_bad_format(const char* str); +static bool exclude_files_cb(void *value, void *exclude_args); /* Tablespace mapping */ static TablespaceList tablespace_dirs = {NULL, NULL}; @@ -552,9 +559,8 @@ db_map_entry_free(void *entry) * TODO: make it strictly local */ void -dir_list_file(parray *files, const char *root, bool exclude, bool follow_symlink, - bool add_root, bool backup_logs, bool skip_hidden, int external_dir_num, - fio_location location) +dir_list_file(parray *files, const char *root, bool handle_tablespaces, bool follow_symlink, + bool backup_logs, bool skip_hidden, int external_dir_num, fio_location location) { pgFile *file; @@ -577,14 +583,11 @@ dir_list_file(parray *files, const char *root, bool exclude, bool follow_symlink elog(WARNING, "Skip \"%s\": unexpected file format", root); return; } - if (add_root) - parray_append(files, file); - dir_list_file_internal(files, file, root, exclude, follow_symlink, + dir_list_file_internal(files, file, root, handle_tablespaces, follow_symlink, backup_logs, skip_hidden, external_dir_num, location); - if (!add_root) - pgFileFree(file); + pgFileFree(file); } #define CHECK_FALSE 0 @@ -656,54 +659,6 @@ dir_check_file(pgFile *file, bool backup_logs) } } - /* - * Do not copy tablespaces twice. It may happen if the tablespace is located - * inside the PGDATA. - */ - if (S_ISDIR(file->mode) && - strcmp(file->name, TABLESPACE_VERSION_DIRECTORY) == 0) - { - Oid tblspcOid; - char tmp_rel_path[MAXPGPATH]; - - /* - * Valid path for the tablespace is - * pg_tblspc/tblsOid/TABLESPACE_VERSION_DIRECTORY - */ - if (!path_is_prefix_of_path(PG_TBLSPC_DIR, file->rel_path)) - return CHECK_FALSE; - sscanf_res = sscanf(file->rel_path, PG_TBLSPC_DIR "/%u/%s", - &tblspcOid, tmp_rel_path); - if (sscanf_res == 0) - return CHECK_FALSE; - } - - if (in_tablespace) - { - char tmp_rel_path[MAXPGPATH]; - - sscanf_res = sscanf(file->rel_path, PG_TBLSPC_DIR "/%u/%[^/]/%u/", - &(file->tblspcOid), tmp_rel_path, - &(file->dbOid)); - - /* - * We should skip other files and directories rather than - * TABLESPACE_VERSION_DIRECTORY, if this is recursive tablespace. - */ - if (sscanf_res == 2 && strcmp(tmp_rel_path, TABLESPACE_VERSION_DIRECTORY) != 0) - return CHECK_FALSE; - } - else if (path_is_prefix_of_path("global", file->rel_path)) - { - file->tblspcOid = GLOBALTABLESPACE_OID; - } - else if (path_is_prefix_of_path("base", file->rel_path)) - { - file->tblspcOid = DEFAULTTABLESPACE_OID; - - sscanf(file->rel_path, "base/%u/", &(file->dbOid)); - } - /* Do not backup ptrack_init files */ if (S_ISREG(file->mode) && strcmp(file->name, "ptrack_init") == 0) return CHECK_FALSE; @@ -754,23 +709,105 @@ dir_check_file(pgFile *file, bool backup_logs) } /* - * List files in parent->path directory. If "exclude" is true do not add into - * "files" files from pgdata_exclude_files and directories from - * pgdata_exclude_dir. + * Excluding default files from the files list. + * Input: + * parray *files - an array of pgFile* to filter. + * croterion_fn - a callback that filters things out + * Output: + * true - if the file must be deleted from the list + * false - otherwise + */ + +static bool +exclude_files_cb(void *value, void *exclude_args) { + pgFile *file = (pgFile*) value; + exclude_cb_ctx *ex_ctx = (exclude_cb_ctx*) exclude_args; + + /* + * Check the file relative path for previously excluded dir prefix. These files + * should not be in the list, only their empty parent directory, see dir_check_file. + * + * Assuming that the excluded dir is ALWAYS followed by its content like this: + * pref/dir/ + * pref/dir/file1 + * pref/dir/file2 + * pref/dir/file3 + * ... + * we can make prefix checks only for files that subsequently follow the excluded dir + * and avoid unnecessary checks for the rest of the files. So we store the prefix length, + * update it and the prefix itself once we've got a CHECK_EXCLUDE_FALSE status code, + * keep doing prefix checks while there are files in that directory and set prefix length + * to 0 once they are gone. + */ + if(ex_ctx->pref_len > 0 + && strncmp(ex_ctx->exclude_dir_content_pref, file->rel_path, ex_ctx->pref_len) == 0) { + return true; + } else { + memset(ex_ctx->exclude_dir_content_pref, 0, ex_ctx->pref_len); + ex_ctx->pref_len = 0; + } + + int check_res = dir_check_file(file, ex_ctx->backup_logs); + + switch(check_res) { + case CHECK_FALSE: + return true; + break; + case CHECK_TRUE:; + return false; + break; + case CHECK_EXCLUDE_FALSE: + // since the excluded dir always goes before its contents, memorize it + // and use it for further files filtering. + strcpy(ex_ctx->exclude_dir_content_pref, file->rel_path); + ex_ctx->pref_len = strlen(file->rel_path); + return false; + break; + default: + // Should not get there normally. + assert(false); + return false; + break; + } + + // Should not get there as well. + return false; +} + +void exclude_files(parray *files, bool backup_logs) { + exclude_cb_ctx ctx = { + .pref_len = 0, + .backup_logs = backup_logs, + .exclude_dir_content_pref = "\0", + }; + + parray_remove_if(files, exclude_files_cb, (void*)&ctx, pgFileFree); +} + +/* + * List files in parent->path directory. + * If "handle_tablespaces" is true, handle recursive tablespaces + * and the ones located inside pgdata. + * If "follow_symlink" is true, follow symlinks so that the + * fio_stat call fetches the info from the file pointed to by the + * symlink, not from the symlink itself. * * TODO: should we check for interrupt here ? */ static void dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, - bool exclude, bool follow_symlink, bool backup_logs, + bool handle_tablespaces, bool follow_symlink, bool backup_logs, bool skip_hidden, int external_dir_num, fio_location location) { DIR *dir; struct dirent *dent; + bool in_tablespace = false; if (!S_ISDIR(parent->mode)) elog(ERROR, "\"%s\" is not a directory", parent_dir); + in_tablespace = path_is_prefix_of_path(PG_TBLSPC_DIR, parent->rel_path); + /* Open directory and list contents */ dir = fio_opendir(location, parent_dir); if (dir == NULL) @@ -790,13 +827,12 @@ dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, pgFile *file; char child[MAXPGPATH]; char rel_child[MAXPGPATH]; - char check_res; join_path_components(child, parent_dir, dent->d_name); join_path_components(rel_child, parent->rel_path, dent->d_name); - file = pgFileNew(child, rel_child, follow_symlink, external_dir_num, - location); + file = pgFileNew(child, rel_child, follow_symlink, + external_dir_num, location); if (file == NULL) continue; @@ -809,8 +845,7 @@ dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, } /* skip hidden files and directories */ - if (skip_hidden && file->name[0] == '.') - { + if (skip_hidden && file->name[0] == '.') { elog(WARNING, "Skip hidden file: '%s'", child); pgFileFree(file); continue; @@ -827,21 +862,50 @@ dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, continue; } - if (exclude) - { - check_res = dir_check_file(file, backup_logs); - if (check_res == CHECK_FALSE) - { - /* Skip */ - pgFileFree(file); - continue; - } - else if (check_res == CHECK_EXCLUDE_FALSE) + if(handle_tablespaces) { + /* + * Do not copy tablespaces twice. It may happen if the tablespace is located + * inside the PGDATA. + */ + if (S_ISDIR(file->mode) && + strcmp(file->name, TABLESPACE_VERSION_DIRECTORY) == 0) { - /* We add the directory itself which content was excluded */ - parray_append(files, file); - continue; + Oid tblspcOid; + char tmp_rel_path[MAXPGPATH]; + int sscanf_res; + + /* + * Valid path for the tablespace is + * pg_tblspc/tblsOid/TABLESPACE_VERSION_DIRECTORY + */ + if (!path_is_prefix_of_path(PG_TBLSPC_DIR, file->rel_path)) + continue; + sscanf_res = sscanf(file->rel_path, PG_TBLSPC_DIR "/%u/%s", + &tblspcOid, tmp_rel_path); + if (sscanf_res == 0) + continue; } + + if (in_tablespace) { + char tmp_rel_path[MAXPGPATH]; + ssize_t sscanf_res; + + sscanf_res = sscanf(file->rel_path, PG_TBLSPC_DIR "/%u/%[^/]/%u/", + &(file->tblspcOid), tmp_rel_path, + &(file->dbOid)); + + /* + * We should skip other files and directories rather than + * TABLESPACE_VERSION_DIRECTORY, if this is recursive tablespace. + */ + if (sscanf_res == 2 && strcmp(tmp_rel_path, TABLESPACE_VERSION_DIRECTORY) != 0) + continue; + } else if (path_is_prefix_of_path("global", file->rel_path)) { + file->tblspcOid = GLOBALTABLESPACE_OID; + } else if (path_is_prefix_of_path("base", file->rel_path)) { + file->tblspcOid = DEFAULTTABLESPACE_OID; + sscanf(file->rel_path, "base/%u/", &(file->dbOid)); + } } parray_append(files, file); @@ -851,7 +915,7 @@ dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, * recursively. */ if (S_ISDIR(file->mode)) - dir_list_file_internal(files, file, child, exclude, follow_symlink, + dir_list_file_internal(files, file, child, handle_tablespaces, follow_symlink, backup_logs, skip_hidden, external_dir_num, location); } @@ -1845,29 +1909,8 @@ read_database_map(pgBackup *backup) void cleanup_tablespace(const char *path) { - int i; - char fullpath[MAXPGPATH]; - parray *files = parray_new(); - - fio_list_dir(files, path, false, false, false, false, false, 0); - - /* delete leaf node first */ - parray_qsort(files, pgFileCompareRelPathWithExternalDesc); - - for (i = 0; i < parray_num(files); i++) - { - pgFile *file = (pgFile *) parray_get(files, i); - - join_path_components(fullpath, path, file->rel_path); - - if (fio_remove(FIO_DB_HOST, fullpath, true) == 0) - elog(LOG, "Deleted file \"%s\"", fullpath); - else - elog(ERROR, "Cannot delete file or directory \"%s\": %s", fullpath, strerror(errno)); - } - - parray_walk(files, pgFileFree); - parray_free(files); + pioDrive_i drive = pioDriveForLocation(FIO_BACKUP_HOST); + $i(pioRemoveDir, drive, .root = path, .root_as_well = false); } /* diff --git a/src/merge.c b/src/merge.c index f64b72611..e6e4f650f 100644 --- a/src/merge.c +++ b/src/merge.c @@ -1127,33 +1127,6 @@ merge_files(void *arg) return NULL; } -/* Recursively delete a directory and its contents */ -static void -remove_dir_with_files(const char *path) -{ - parray *files = parray_new(); - int i; - char full_path[MAXPGPATH]; - - dir_list_file(files, path, false, false, true, false, false, 0, FIO_LOCAL_HOST); - parray_qsort(files, pgFileCompareRelPathWithExternalDesc); - for (i = 0; i < parray_num(files); i++) - { - pgFile *file = (pgFile *) parray_get(files, i); - - join_path_components(full_path, path, file->rel_path); - - if (fio_remove(FIO_LOCAL_HOST, full_path, true) == 0) - elog(LOG, "Deleted \"%s\"", full_path); - else - elog(ERROR, "Cannot delete file or directory \"%s\": %s", full_path, strerror(errno)); - } - - /* cleanup */ - parray_walk(files, pgFileFree); - parray_free(files); -} - /* Get index of external directory */ static int get_external_index(const char *key, const parray *list) @@ -1187,7 +1160,8 @@ reorder_external_dirs(pgBackup *to_backup, parray *to_external, { char old_path[MAXPGPATH]; makeExternalDirPathByNum(old_path, externaldir_template, i + 1); - remove_dir_with_files(old_path); + pioDrive_i drive = pioDriveForLocation(FIO_LOCAL_HOST); + $i(pioRemoveDir, drive, .root = old_path, .root_as_well = true); } else if (from_num != i + 1) { diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 257a917c8..711a60e82 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -977,9 +977,9 @@ extern const char* deparse_compress_alg(int alg); extern bool get_control_value_int64(const char *str, const char *name, int64 *value_int64, bool is_mandatory); extern bool get_control_value_str(const char *str, const char *name, char *value_str, size_t value_str_size, bool is_mandatory); -extern void dir_list_file(parray *files, const char *root, bool exclude, - bool follow_symlink, bool add_root, bool backup_logs, - bool skip_hidden, int external_dir_num, fio_location location); +extern void dir_list_file(parray *files, const char *root, bool handle_tablespaces, + bool follow_symlink, bool backup_logs, bool skip_hidden, + int external_dir_num, fio_location location); extern const char *get_tablespace_mapping(const char *dir); extern void create_data_directories(parray *dest_files, @@ -1042,6 +1042,7 @@ extern int pgPrefixCompareString(const void *str1, const void *str2); extern int pgCompareOid(const void *f1, const void *f2); extern void pfilearray_clear_locks(parray *file_list); extern void set_forkname(pgFile *file); +extern void exclude_files(parray *files, bool backup_logs); /* in data.c */ extern bool check_data_file(ConnectionArgs *arguments, pgFile *file, diff --git a/src/restore.c b/src/restore.c index 9113a15da..ebd9bae22 100644 --- a/src/restore.c +++ b/src/restore.c @@ -858,7 +858,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, elog(INFO, "Extracting the content of destination directory for incremental restore"); time(&start_time); - fio_list_dir(pgdata_files, pgdata_path, false, true, false, false, true, 0); + db_list_dir(pgdata_files, pgdata_path, false, false, 0); /* * TODO: @@ -878,8 +878,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, char *external_path = parray_get(external_dirs, i); parray *external_files = parray_new(); - fio_list_dir(external_files, external_path, - false, true, false, false, true, i+1); + db_list_dir(external_files, external_path, false, false, i+1); parray_concat(pgdata_files, external_files); parray_free(external_files); diff --git a/src/utils/file.c b/src/utils/file.c index 027df3f13..82df5266c 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1,5 +1,6 @@ #include #include +#include #include "pg_probackup.h" #include @@ -33,14 +34,18 @@ typedef struct typedef struct { char path[MAXPGPATH]; - bool exclude; + bool handle_tablespaces; bool follow_symlink; - bool add_root; bool backup_logs; bool skip_hidden; int external_dir_num; } fio_list_dir_request; +typedef struct { + char path[MAXPGPATH]; + bool root_as_well; +} fio_remove_dir_request; + typedef struct { mode_t mode; @@ -2749,88 +2754,41 @@ fio_send_file_impl(int out, const char* path) close(fd); } -/* Compile the array of files located on remote machine in directory root */ -static void -fio_list_dir_internal(parray *files, const char *root, bool exclude, - bool follow_symlink, bool add_root, bool backup_logs, - bool skip_hidden, int external_dir_num) -{ - fio_header hdr; - fio_list_dir_request req; - char *buf = pgut_malloc(CHUNK_SIZE); - - /* Send to the agent message with parameters for directory listing */ - snprintf(req.path, MAXPGPATH, "%s", root); - req.exclude = exclude; - req.follow_symlink = follow_symlink; - req.add_root = add_root; - req.backup_logs = backup_logs; - req.skip_hidden = skip_hidden; - req.external_dir_num = external_dir_num; - - hdr.cop = FIO_LIST_DIR; - hdr.size = sizeof(req); - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(fio_stdout, &req, hdr.size), hdr.size); - - for (;;) - { - /* receive data */ - IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); - - if (hdr.cop == FIO_SEND_FILE_EOF) - { - /* the work is done */ - break; - } - else if (hdr.cop == FIO_SEND_FILE) - { - pgFile *file = NULL; - fio_pgFile fio_file; +void db_list_dir(parray *files, const char *root, bool handle_tablespaces, + bool backup_logs, int external_dir_num) { + pioDrive_i drive = pioDriveForLocation(FIO_DB_HOST); + $i(pioListDir, drive, .files = files, .root = root, .handle_tablespaces = handle_tablespaces, + .symlink_and_hidden = true, .backup_logs = backup_logs, .skip_hidden = true, + .external_dir_num = external_dir_num); +} - /* receive rel_path */ - IO_CHECK(fio_read_all(fio_stdin, buf, hdr.size), hdr.size); - file = pgFileInit(buf); - - /* receive metainformation */ - IO_CHECK(fio_read_all(fio_stdin, &fio_file, sizeof(fio_file)), sizeof(fio_file)); - - file->mode = fio_file.mode; - file->size = fio_file.size; - file->mtime = fio_file.mtime; - file->is_datafile = fio_file.is_datafile; - file->tblspcOid = fio_file.tblspcOid; - file->dbOid = fio_file.dbOid; - file->relOid = fio_file.relOid; - file->forkName = fio_file.forkName; - file->segno = fio_file.segno; - file->external_dir_num = fio_file.external_dir_num; - - if (fio_file.linked_len > 0) - { - IO_CHECK(fio_read_all(fio_stdin, buf, fio_file.linked_len), fio_file.linked_len); +void backup_list_dir(parray *files, const char *root) { + pioDrive_i drive = pioDriveForLocation(FIO_BACKUP_HOST); + $i(pioListDir, drive, .files = files, .root = root, .handle_tablespaces = false, + .symlink_and_hidden = false, .backup_logs = false, .skip_hidden = false, + .external_dir_num = 0); +} - file->linked = pgut_malloc(fio_file.linked_len); - snprintf(file->linked, fio_file.linked_len, "%s", buf); - } +/* + * WARNING! this function is not paired with fio_remove_dir + * because there is no such function. Instead, it is paired + * with pioRemoteDrive_pioRemoveDir, see PBCKP-234 for further details + */ +static void +fio_remove_dir_impl(int out, char* buf) { + fio_remove_dir_request *frdr = (fio_remove_dir_request *)buf; + pioDrive_i drive = pioDriveForLocation(FIO_LOCAL_HOST); -// elog(INFO, "Received file: %s, mode: %u, size: %lu, mtime: %lu", -// file->rel_path, file->mode, file->size, file->mtime); + // In an essence this all is just a wrapper for a pioRemoveDir call on a local drive + $i(pioRemoveDir, drive, .root = frdr->path, .root_as_well = frdr->root_as_well); - parray_append(files, file); - } - else - { - /* TODO: fio_disconnect may get assert fail when running after this */ - elog(ERROR, "Remote agent returned message of unexpected type: %i", hdr.cop); - } - } + fio_header hdr; + hdr.cop = FIO_REMOVE_DIR; + hdr.arg = 0; - pg_free(buf); + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); } - /* * To get the arrays of files we use the same function dir_list_file(), * that is used for local backup. @@ -2858,8 +2816,8 @@ fio_list_dir_impl(int out, char* buf) */ instance_config.logger.log_level_console = ERROR; - dir_list_file(file_files, req->path, req->exclude, req->follow_symlink, - req->add_root, req->backup_logs, req->skip_hidden, + dir_list_file(file_files, req->path, req->handle_tablespaces, + req->follow_symlink, req->backup_logs, req->skip_hidden, req->external_dir_num, FIO_LOCAL_HOST); /* send information about files to the main process */ @@ -2906,20 +2864,6 @@ fio_list_dir_impl(int out, char* buf) IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); } -/* Wrapper for directory listing */ -void -fio_list_dir(parray *files, const char *root, bool exclude, - bool follow_symlink, bool add_root, bool backup_logs, - bool skip_hidden, int external_dir_num) -{ - if (fio_is_remote(FIO_DB_HOST)) - fio_list_dir_internal(files, root, exclude, follow_symlink, add_root, - backup_logs, skip_hidden, external_dir_num); - else - dir_list_file(files, root, exclude, follow_symlink, add_root, - backup_logs, skip_hidden, external_dir_num, FIO_LOCAL_HOST); -} - PageState * fio_get_checksum_map(fio_location location, const char *fullpath, uint32 checksum_version, int n_blocks, XLogRecPtr dest_stop_lsn, BlockNumber segmentno) @@ -3303,6 +3247,9 @@ fio_communicate(int in, int out) case FIO_LIST_DIR: fio_list_dir_impl(out, buf); break; + case FIO_REMOVE_DIR: + fio_remove_dir_impl(out, buf); + break; case FIO_SEND_PAGES: /* buf contain fio_send_request header and bitmap. */ fio_send_pages_impl(out, buf); @@ -3590,6 +3537,55 @@ pioLocalDrive_pioIsRemote(VSelf) return false; } +static void +pioLocalDrive_pioListDir(VSelf, parray *files, const char *root, bool handle_tablespaces, + bool follow_symlink, bool backup_logs, bool skip_hidden, + int external_dir_num) { + FOBJ_FUNC_ARP(); + dir_list_file(files, root, handle_tablespaces, follow_symlink, backup_logs, + skip_hidden, external_dir_num, FIO_LOCAL_HOST); +} + +static void +pioLocalDrive_pioRemoveDir(VSelf, const char *root, bool root_as_well) { + FOBJ_FUNC_ARP(); + Self(pioLocalDrive); + char full_path[MAXPGPATH]; + /* list files to be deleted */ + parray* files = parray_new(); + $(pioListDir, self, .files = files, .root = root, .handle_tablespaces = false, + .symlink_and_hidden = false, .backup_logs = false, .skip_hidden = false, .external_dir_num = 0); + + + // adding the root directory because it must be deleted too + if(root_as_well) + parray_append(files, pgFileNew(root, "", false, 0, FIO_LOCAL_HOST)); + + /* delete leaf node first */ + parray_qsort(files, pgFileCompareRelPathWithExternalDesc); + size_t num_files = parray_num(files); + for (int i = 0; i < num_files; i++) + { + pgFile *file = (pgFile *) parray_get(files, i); + + join_path_components(full_path, root, file->rel_path); + + if (interrupted) + elog(ERROR, "interrupted during the directory deletion: %s", full_path); + + if (progress) + elog(INFO, "Progress: (%d/%zd). Delete file \"%s\"", + i + 1, num_files, full_path); + + err_i err = $(pioRemove, self, full_path, false); + if($haserr(err)) + elog(ERROR, "Cannot remove file or directory \"%s\": %s", full_path, $errmsg(err)); + } + + parray_walk(files, pgFileFree); + parray_free(files); +} + /* LOCAL FILE */ static void pioLocalFile_fobjDispose(VSelf) @@ -3857,6 +3853,102 @@ pioRemoteDrive_pioIsRemote(VSelf) return true; } +static void +pioRemoteDrive_pioListDir(VSelf, parray *files, const char *root, bool handle_tablespaces, + bool follow_symlink, bool backup_logs, bool skip_hidden, + int external_dir_num) { + FOBJ_FUNC_ARP(); + fio_header hdr; + fio_list_dir_request req; + char *buf = pgut_malloc(CHUNK_SIZE); + + /* Send to the agent message with parameters for directory listing */ + snprintf(req.path, MAXPGPATH, "%s", root); + req.handle_tablespaces = handle_tablespaces; + req.follow_symlink = follow_symlink; + req.backup_logs = backup_logs; + req.skip_hidden = skip_hidden; + req.external_dir_num = external_dir_num; + + hdr.cop = FIO_LIST_DIR; + hdr.size = sizeof(req); + + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_write_all(fio_stdout, &req, hdr.size), hdr.size); + + for (;;) { + /* receive data */ + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + + if (hdr.cop == FIO_SEND_FILE_EOF) { + /* the work is done */ + break; + } else if (hdr.cop == FIO_SEND_FILE) { + pgFile *file = NULL; + fio_pgFile fio_file; + + /* receive rel_path */ + IO_CHECK(fio_read_all(fio_stdin, buf, hdr.size), hdr.size); + file = pgFileInit(buf); + + /* receive metainformation */ + IO_CHECK(fio_read_all(fio_stdin, &fio_file, sizeof(fio_file)), sizeof(fio_file)); + + file->mode = fio_file.mode; + file->size = fio_file.size; + file->mtime = fio_file.mtime; + file->is_datafile = fio_file.is_datafile; + file->tblspcOid = fio_file.tblspcOid; + file->dbOid = fio_file.dbOid; + file->relOid = fio_file.relOid; + file->forkName = fio_file.forkName; + file->segno = fio_file.segno; + file->external_dir_num = fio_file.external_dir_num; + + if (fio_file.linked_len > 0) { + IO_CHECK(fio_read_all(fio_stdin, buf, fio_file.linked_len), fio_file.linked_len); + + file->linked = pgut_malloc(fio_file.linked_len); + snprintf(file->linked, fio_file.linked_len, "%s", buf); + } + +// elog(INFO, "Received file: %s, mode: %u, size: %lu, mtime: %lu", +// file->rel_path, file->mode, file->size, file->mtime); + + parray_append(files, file); + } else { + /* TODO: fio_disconnect may get assert fail when running after this */ + elog(ERROR, "Remote agent returned message of unexpected type: %i", hdr.cop); + } + } + + pg_free(buf); +} + +static void +pioRemoteDrive_pioRemoveDir(VSelf, const char *root, bool root_as_well) { + FOBJ_FUNC_ARP(); + fio_header hdr; + fio_remove_dir_request req; + + /* Send to the agent message with parameters for directory listing */ + snprintf(req.path, MAXPGPATH, "%s", root); + req.root_as_well = root_as_well; + + hdr.cop = FIO_REMOVE_DIR; + hdr.size = sizeof(req); + + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_write_all(fio_stdout, &req, hdr.size), hdr.size); + + /* get the response */ + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + Assert(hdr.cop == FIO_REMOVE_DIR); + + if (hdr.arg != 0) + elog(ERROR, "couldn't remove remote dir"); +} + /* REMOTE FILE */ static err_i diff --git a/src/utils/file.h b/src/utils/file.h index 4e02d7cc8..0523bcd74 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -60,6 +60,7 @@ typedef enum FIO_DISCONNECT, FIO_DISCONNECTED, FIO_LIST_DIR, + FIO_REMOVE_DIR, FIO_CHECK_POSTMASTER, FIO_GET_ASYNC_ERROR, FIO_WRITE_ASYNC, @@ -178,8 +179,9 @@ extern bool fio_is_same_file(fio_location location, const char* filename1, co extern ssize_t fio_readlink(fio_location location, const char *path, char *value, size_t valsiz); extern pid_t fio_check_postmaster(fio_location location, const char *pgdata); -extern void fio_list_dir(parray *files, const char *root, bool exclude, bool follow_symlink, - bool add_root, bool backup_logs, bool skip_hidden, int external_dir_num); +extern void db_list_dir(parray *files, const char *root, bool handle_tablespaces, + bool backup_logs, int external_dir_num); +extern void backup_list_dir(parray *files, const char *root); struct PageState; /* defined in pg_probackup.h */ extern struct PageState *fio_get_checksum_map(fio_location location, const char *fullpath, uint32 checksum_version, @@ -241,6 +243,10 @@ typedef struct stat stat_t; #define mth__pioGetCRC32 pg_crc32, (path_t, path), (bool, compressed), \ (err_i *, err) #define mth__pioIsRemote bool +#define mth__pioListDir void, (parray *, files), (const char *, root), \ + (bool, handle_tablespaces), (bool, symlink_and_hidden), \ + (bool, backup_logs), (bool, skip_hidden), (int, external_dir_num) +#define mth__pioRemoveDir void, (const char *, root), (bool, root_as_well) fobj_method(pioOpen); fobj_method(pioStat); @@ -249,9 +255,11 @@ fobj_method(pioRename); fobj_method(pioExists); fobj_method(pioIsRemote); fobj_method(pioGetCRC32); +fobj_method(pioListDir); +fobj_method(pioRemoveDir); #define iface__pioDrive mth(pioOpen, pioStat, pioRemove, pioRename), \ - mth(pioExists, pioGetCRC32, pioIsRemote) + mth(pioExists, pioGetCRC32, pioIsRemote, pioListDir, pioRemoveDir) fobj_iface(pioDrive); #define kls__pioLocalDrive iface__pioDrive, iface(pioDrive) diff --git a/src/utils/parray.c b/src/utils/parray.c index 792e26907..0603c10c4 100644 --- a/src/utils/parray.c +++ b/src/utils/parray.c @@ -217,3 +217,31 @@ bool parray_contains(parray *array, void *elem) } return false; } + +/* effectively remove elements that satisfy certain criterion */ +void +parray_remove_if(parray *array, criterion_fn criterion, void *args, cleanup_fn clean) { + int i = 0; + int j = 0; + + /* removing certain elements */ + while(j < parray_num(array)) { + void *value = array->data[j]; + // if the value satisfies the criterion, clean it up + if(criterion(value, args)) { + clean(value); + j++; + continue; + } + + if(i != j) + array->data[i] = array->data[j]; + + i++; + j++; + } + + /* adjust the number of used elements */ + array->used -= j - i; +} + diff --git a/src/utils/parray.h b/src/utils/parray.h index e92ad728c..08846f252 100644 --- a/src/utils/parray.h +++ b/src/utils/parray.h @@ -16,6 +16,9 @@ */ typedef struct parray parray; +typedef bool (*criterion_fn)(void *value, void *args); +typedef void (*cleanup_fn)(void *ref); + extern parray *parray_new(void); extern void parray_expand(parray *array, size_t newnum); extern void parray_free(parray *array); @@ -32,6 +35,7 @@ extern void *parray_bsearch(parray *array, const void *key, int(*compare)(const extern int parray_bsearch_index(parray *array, const void *key, int(*compare)(const void *, const void *)); extern void parray_walk(parray *array, void (*action)(void *)); extern bool parray_contains(parray *array, void *elem); +extern void parray_remove_if(parray *array, criterion_fn criterion, void *args, cleanup_fn clean); #endif /* PARRAY_H */ From 53a3e1bb557d204907314418699dd1d36d3de4a4 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 2 Nov 2022 23:36:10 +0300 Subject: [PATCH 065/339] stub for s3 integration --- Makefile | 16 ++++++++ src/backup.c | 3 +- src/pg_probackup.c | 91 +++++++++++++++++++++++++++++++++++++++++++++- src/pg_probackup.h | 19 ++++++++++ src/utils/file.c | 14 +++++++ src/utils/file.h | 3 +- 6 files changed, 141 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index 54fd6cb10..0300e1032 100644 --- a/Makefile +++ b/Makefile @@ -76,6 +76,19 @@ include $(top_builddir)/src/Makefile.global include $(top_srcdir)/contrib/contrib-global.mk endif +ifndef S3_DIR + ifneq ("$(wildcard $(abspath $(top_pbk_srcdir))/../s3)", "") + S3_DIR = $(abspath $(CURDIR))/../s3 + endif +endif + +ifdef S3_DIR + LDFLAGS += -lcurl + CFLAGS += $(shell pkg-config --cflags libxml-2.0) -DPBCKP_S3=1 + LDFLAGS += $(shell pkg-config --libs libxml-2.0) + OBJS += $(S3_DIR)/s3.o +endif + # PG_CPPFLAGS = -I$(libpq_srcdir) ${PTHREAD_CFLAGS} -I$(top_pbk_srcdir)src -I$(BORROW_DIR) PG_CPPFLAGS += -I$(top_pbk_srcdir)src/fu_util -Wno-declaration-after-statement @@ -87,6 +100,9 @@ PG_LIBS_INTERNAL = $(libpq_pgport) ${PTHREAD_CFLAGS} # additional dependencies on borrowed files src/backup.o src/catchup.o src/pg_probackup.o: $(BORROW_DIR)/streamutil.h +ifdef S3_DIR + src/backup.o src/catchup.o src/pg_probackup.o: $(S3_DIR)/s3.o +endif src/stream.o $(BORROW_DIR)/receivelog.o $(BORROW_DIR)/streamutil.o $(BORROW_DIR)/walmethods.o: $(BORROW_DIR)/receivelog.h $(BORROW_DIR)/receivelog.h: $(BORROW_DIR)/walmethods.h diff --git a/src/backup.c b/src/backup.c index 5f8a2eecd..6d3b28837 100644 --- a/src/backup.c +++ b/src/backup.c @@ -46,7 +46,6 @@ static void do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, static void pg_switch_wal(PGconn *conn); -static void pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startbackup_conn, PGNodeInfo *nodeInfo); static void check_external_for_tablespaces(parray *external_list, PGconn *backup_conn); @@ -1827,7 +1826,7 @@ pg_stop_backup_write_file_helper(const char *path, const char *filename, const c /* * Notify end of backup to PostgreSQL server. */ -static void +void pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startbackup_conn, PGNodeInfo *nodeInfo) { diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 3b008deb0..14c92f95e 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -164,6 +164,12 @@ int64 ttl = -1; static char *expire_time_string = NULL; static pgSetBackupParams *set_backup_params = NULL; +#ifdef PBCKP_S3 +/* S3 options */ +S3_protocol s3_protocol; +char* s3_target_bucket = NULL; +#endif + /* ================ backupState =========== */ static char *backup_id_string = NULL; pgBackup current; @@ -174,6 +180,9 @@ static bool help_opt = false; static void opt_incr_restore_mode(ConfigOption *opt, const char *arg); static void opt_backup_mode(ConfigOption *opt, const char *arg); static void opt_show_format(ConfigOption *opt, const char *arg); +#ifdef PBCKP_S3 +static void opt_s3_protocol(ConfigOption *opt, const char *arg); +#endif static void compress_init(ProbackupSubcmd const subcmd); @@ -270,6 +279,12 @@ static ConfigOption cmd_options[] = { 'I', 170, "ttl", &ttl, SOURCE_CMD_STRICT, SOURCE_DEFAULT, 0, OPTION_UNIT_S, option_get_value}, { 's', 171, "expire-time", &expire_time_string, SOURCE_CMD_STRICT }, +#ifdef PBCKP_S3 + /* S3 options */ + { 'f', 245, "s3", opt_s3_protocol, SOURCE_CMD_STRICT }, + { 's', 246, "target-bucket", &s3_target_bucket, SOURCE_CMD_STRICT }, +#endif + /* options for backward compatibility * TODO: remove in 3.0.0 */ @@ -952,6 +967,19 @@ main(int argc, char *argv[]) compress_init(backup_subcmd); +#ifdef PBCKP_S3 + if (s3_protocol != S3_INVALID_PROTOCOL) + { + char* s3_config_file=""; + read_s3_config(s3_config_file); + } + else + { + if (s3_target_bucket != NULL) + elog(WARNING, "You cannot specify s3-target without using --s3 option with name of protocol"); + } +#endif + /* do actual operation */ switch (backup_subcmd) { @@ -964,11 +992,27 @@ main(int argc, char *argv[]) wal_file_path, wal_file_name, batch_size, !no_validate_wal); break; case ADD_INSTANCE_CMD: - return do_add_instance(instanceState, &instance_config); + { + int err = 0; + err = do_add_instance(instanceState, &instance_config); +#ifdef PBCKP_S3 + if (err == 0 && s3_protocol != S3_INVALID_PROTOCOL) + err = do_S3_write_config(&instance_config); +#endif + return err; + } case DELETE_INSTANCE_CMD: return do_delete_instance(instanceState); case INIT_CMD: - return do_init(catalogState); + { + int err = 0; + err = do_init(catalogState); +#ifdef PBCKP_S3 + if (err == 0 && s3_protocol != S3_INVALID_PROTOCOL) + err = S3_pre_start_check(config); +#endif + return err; + } case BACKUP_CMD: { current.stream = stream_wal; @@ -983,6 +1027,10 @@ main(int argc, char *argv[]) elog(ERROR, "required parameter not specified: BACKUP_MODE " "(-b, --backup-mode)"); +#ifdef PBCKP_S3 + if (s3_protocol != S3_INVALID_PROTOCOL) + return do_S3_backup(instanceState, set_backup_params, start_time); +#endif return do_backup(instanceState, set_backup_params, no_validate, no_sync, backup_logs, start_time); } @@ -990,6 +1038,10 @@ main(int argc, char *argv[]) return do_catchup(catchup_source_pgdata, catchup_destination_pgdata, num_threads, !no_sync, exclude_absolute_paths_list, exclude_relative_paths_list); case RESTORE_CMD: +#ifdef PBCKP_S3 + if (s3_protocol != S3_INVALID_PROTOCOL) + return do_S3_restore(instanceState, current.backup_id); +#endif return do_restore_or_validate(instanceState, current.backup_id, recovery_target_options, restore_params, no_sync); @@ -1009,6 +1061,10 @@ main(int argc, char *argv[]) restore_params, no_sync); case SHOW_CMD: +#ifdef PBCKP_S3 + if (s3_protocol != S3_INVALID_PROTOCOL) + return do_S3_show(instanceState); +#endif return do_show(catalogState, instanceState, current.backup_id, show_archive); case DELETE_CMD: @@ -1197,3 +1253,34 @@ opt_exclude_path(ConfigOption *opt, const char *arg) else opt_parser_add_to_parray_helper(&exclude_relative_paths_list, arg); } + +#ifdef PBCKP_S3 +static S3_protocol +parse_s3_protocol(const char *value) +{ + const char *v = value; + size_t len; + + /* Skip all spaces detected */ + while (IsSpace(*v)) + v++; + len = strlen(v); + + if (len > 0 && pg_strncasecmp("MINIO", v, len) == 0) + return S3_MINIO_PROTOCOL; + if (len > 0 && pg_strncasecmp("AWS", v, len) == 0) + return S3_AWS_PROTOCOL; + else if (len > 0 && pg_strncasecmp("GOOGLE", v, len) == 0) + return S3_GOOGLE_PROTOCOL; + else if (len > 0 && pg_strncasecmp("VK", v, len) == 0) + return S3_VK_PROTOCOL; + else + return S3_INVALID_PROTOCOL; +} + +static void +opt_s3_protocol(ConfigOption *opt, const char *arg) +{ + s3_protocol = parse_s3_protocol(arg); +} +#endif diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 711a60e82..ecea40123 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -43,6 +43,10 @@ #include "pg_probackup_state.h" +#ifdef PGPRO_S3 +#include "../s3/s3.h" +#endif + #if defined(WIN32) && !(defined(_UCRT) && defined(__MINGW64__)) #error Windows port requires compilation in MinGW64 UCRT environment #endif @@ -800,6 +804,11 @@ extern pgBackup current; /* argv of the process */ extern char** commands_args; +#ifdef PBCKP_S3 +/* S3 options */ +extern S3_protocol s3_protocol; +#endif + /* in backup.c */ extern int do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, bool no_validate, bool no_sync, bool backup_logs, time_t start_time); @@ -810,6 +819,15 @@ extern const char *deparse_backup_mode(BackupMode mode); extern void process_block_change(ForkNumber forknum, RelFileNode rnode, BlockNumber blkno); +#ifdef PBCKP_S3 +/* in s3.c */ +extern int do_S3_backup(InstanceState *instanceState, + pgSetBackupParams *set_backup_params,time_t start_time); +extern int do_S3_show(InstanceState *instanceState); +extern int do_S3_restore(InstanceState *instanceState, time_t target_backup_id); +extern int do_S3_write_config(InstanceConfig *instance); +#endif + /* in catchup.c */ extern int do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, bool sync_dest_files, parray *exclude_absolute_paths_list, parray *exclude_relative_paths_list); @@ -1248,6 +1266,7 @@ extern parray *backup_files_list; extern void pg_start_backup(const char *label, bool smooth, pgBackup *backup, PGNodeInfo *nodeInfo, PGconn *conn); +extern void pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startbackup_conn, PGNodeInfo *nodeInfo); extern void pg_silent_client_messages(PGconn *conn); extern void pg_create_restore_point(PGconn *conn, time_t backup_start_time); extern void pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica, char **query_text); diff --git a/src/utils/file.c b/src/utils/file.c index 82df5266c..203895daf 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -8,6 +8,10 @@ #include "file.h" #include "storage/checksum.h" +#ifdef PBCKP_S3 +#include "../s3/s3.h" +#endif + #define PRINTF_BUF_SIZE 1024 static __thread unsigned long fio_fdset = 0; @@ -3417,6 +3421,13 @@ static pioDrive_i remoteDrive; pioDrive_i pioDriveForLocation(fio_location loc) { + if (loc == FIO_CLOUD_HOST) +#ifdef PBCKP_S3 + return cloudDrive; +#else + elog(ERROR, "NO CLOUD DRIVE YET"); +#endif + if (fio_is_remote(loc)) return remoteDrive; else @@ -4915,4 +4926,7 @@ init_pio_objects(void) localDrive = bindref_pioDrive($alloc(pioLocalDrive)); remoteDrive = bindref_pioDrive($alloc(pioRemoteDrive)); +#ifdef PBCKP_S3 + create_pioCloudeDrive(); +#endif } diff --git a/src/utils/file.h b/src/utils/file.h index 0523bcd74..0f7c79bc1 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -88,7 +88,8 @@ typedef enum FIO_LOCAL_HOST, /* data is locate at local host */ FIO_DB_HOST, /* data is located at Postgres server host */ FIO_BACKUP_HOST, /* data is located at backup host */ - FIO_REMOTE_HOST /* date is located at remote host */ + FIO_REMOTE_HOST, /* date is located at remote host */ + FIO_CLOUD_HOST /* date is located at cloud (S3) */ } fio_location; extern fio_location MyLocation; From 5851068e2bca0c9eb0b6c9698d3d9a11ff710e23 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 3 Nov 2022 02:37:29 +0300 Subject: [PATCH 066/339] fix set_forkname Fork detection were broken before set_forkname extraction, and its bug were copied into. Lets reimplement it to be like `parse_filename_for_nonetemp_relation` in PostgreSQL code. --- src/catalog.c | 11 ++++++ src/dir.c | 94 +++++++++++++++++++++++++++++----------------- src/pg_probackup.h | 2 +- 3 files changed, 72 insertions(+), 35 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index 3c0396e1c..f0f34d9a0 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1139,7 +1139,18 @@ get_backup_filelist(pgBackup *backup, bool strict) file->hdr_size = (int) hdr_size; if (file->external_dir_num == 0) + { + bool is_datafile = file->is_datafile; set_forkname(file); + if (is_datafile != file->is_datafile) + { + elog(WARNING, "File '%s' was stored as datafile, but looks like it is not", + file->rel_path); + /* Lets fail in tests */ + Assert(file->is_datafile == file->is_datafile); + file->is_datafile = is_datafile; + } + } parray_append(files, file); } diff --git a/src/dir.c b/src/dir.c index 0c793753c..b2258d39b 100644 --- a/src/dir.c +++ b/src/dir.c @@ -688,20 +688,6 @@ dir_check_file(pgFile *file, bool backup_logs) if (file->forkName == ptrack) /* Compatibility with left-overs from ptrack1 */ return CHECK_FALSE; - else if (file->forkName != none) - return CHECK_TRUE; - - /* Set is_datafile flag */ - { - char suffix[MAXFNAMELEN]; - - /* check if file is datafile */ - sscanf_res = sscanf(file->name, "%u.%d.%s", &(file->relOid), - &(file->segno), suffix); - Assert(sscanf_res > 0); /* since first char is digit */ - if (sscanf_res == 1 || sscanf_res == 2) - file->is_datafile = true; - } } } @@ -1927,34 +1913,74 @@ pfilearray_clear_locks(parray *file_list) } } +static inline bool +is_forkname(char *name, size_t *pos, const char *forkname) +{ + size_t fnlen = strlen(forkname); + if (strncmp(name + *pos, forkname, fnlen) != 0) + return false; + *pos += fnlen; + return true; +} + +#define OIDCHARS 10 + /* Set forkName if possible */ -void +bool set_forkname(pgFile *file) { - int name_len = strlen(file->name); - - /* Auxiliary fork of the relfile */ - if (name_len > 3 && strcmp(file->name + name_len - 3, "_vm") == 0) - file->forkName = vm; + size_t i = 0; + uint64_t oid = 0; /* use 64bit to not check for overflow in a loop */ - else if (name_len > 4 && strcmp(file->name + name_len - 4, "_fsm") == 0) - file->forkName = fsm; + /* pretend it is not relation file */ + file->relOid = 0; + file->forkName = none; + file->is_datafile = false; - else if (name_len > 4 && strcmp(file->name + name_len - 4, ".cfm") == 0) - file->forkName = cfm; + for (i = 0; isdigit(file->name[i]); i++) + { + if (i == 0 && file->name[i] == '0') + return false; + oid = oid * 10 + file->name[i] - '0'; + } + if (i == 0 || i > OIDCHARS || oid > UINT32_MAX) + return false; - else if (name_len > 5 && strcmp(file->name + name_len - 5, "_init") == 0) + /* usual fork name */ + /* /^\d+_(vm|fsm|init|ptrack)$/ */ + if (is_forkname(file->name, &i, "_vm")) + file->forkName = vm; + else if (is_forkname(file->name, &i, "_fsm")) + file->forkName = fsm; + else if (is_forkname(file->name, &i, "_init")) file->forkName = init; - - else if (name_len > 7 && strcmp(file->name + name_len - 7, "_ptrack") == 0) + else if (is_forkname(file->name, &i, "_ptrack")) file->forkName = ptrack; - // extract relOid for certain forks + /* segment number */ + /* /^\d+(_(vm|fsm|init|ptrack))?\.\d+$/ */ + if (file->name[i] == '.' && isdigit(file->name[i+1])) + { + for (i++; isdigit(file->name[i]); i++) + ; + } + + /* CFS "fork name" */ + if (file->forkName == none && + is_forkname(file->name, &i, ".cfm")) + { + /* /^\d+(\.\d+)?.cfm$/ */ + file->forkName = cfm; + } + + /* If there are excess characters, it is not relation file */ + if (file->name[i] != 0) + { + file->forkName = none; + return false; + } - if ((file->forkName == vm || - file->forkName == fsm || - file->forkName == init || - file->forkName == cfm) && - (sscanf(file->name, "%u*", &(file->relOid)) != 1)) - file->relOid = 0; + file->relOid = oid; + file->is_datafile = file->forkName == none; + return true; } diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 711a60e82..eff9664c8 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1041,7 +1041,7 @@ extern int pgCompareString(const void *str1, const void *str2); extern int pgPrefixCompareString(const void *str1, const void *str2); extern int pgCompareOid(const void *f1, const void *f2); extern void pfilearray_clear_locks(parray *file_list); -extern void set_forkname(pgFile *file); +extern bool set_forkname(pgFile *file); extern void exclude_files(parray *files, bool backup_logs); /* in data.c */ From 2f2d8790aca2c5da4522b138c61df5ab4faf740c Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 3 Nov 2022 03:21:27 +0300 Subject: [PATCH 067/339] fix for forkname detection in get_backup_filelist --- src/catalog.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index f0f34d9a0..a37c067a1 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1138,14 +1138,18 @@ get_backup_filelist(pgBackup *backup, bool strict) if (get_control_value_int64(buf, "hdr_size", &hdr_size, false)) file->hdr_size = (int) hdr_size; - if (file->external_dir_num == 0) + if (file->external_dir_num == 0 && S_ISREG(file->mode)) { bool is_datafile = file->is_datafile; set_forkname(file); if (is_datafile != file->is_datafile) { - elog(WARNING, "File '%s' was stored as datafile, but looks like it is not", - file->rel_path); + if (is_datafile) + elog(WARNING, "File '%s' was stored as datafile, but looks like it is not", + file->rel_path); + else + elog(WARNING, "File '%s' was stored as non-datafile, but looks like it is", + file->rel_path); /* Lets fail in tests */ Assert(file->is_datafile == file->is_datafile); file->is_datafile = is_datafile; From f755d54300e84b6fb3dc3c2a9d91fda730241a23 Mon Sep 17 00:00:00 2001 From: Sofia Kopikova Date: Mon, 7 Nov 2022 15:08:51 +0300 Subject: [PATCH 068/339] Fix wrong ifdef --- src/pg_probackup.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index ecea40123..0c538b8b3 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -43,7 +43,7 @@ #include "pg_probackup_state.h" -#ifdef PGPRO_S3 +#ifdef PBCKP_S3 #include "../s3/s3.h" #endif From d25a594df523163793b726ae9b096c2dbe3cface Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 3 Nov 2022 02:37:58 +0300 Subject: [PATCH 069/339] [PBCKP-235] fix one test for <15.0 --- tests/backup.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/tests/backup.py b/tests/backup.py index 4f447c9bd..6028a3ff6 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -3441,10 +3441,15 @@ def test_backup_atexit(self): self.assertIn( 'WARNING: backup in progress, stop backup', log_content) - - self.assertIn( - 'FROM pg_catalog.pg_backup_stop', - log_content) + + if self.get_version(node) < 150000: + self.assertIn( + 'FROM pg_catalog.pg_stop_backup', + log_content) + else: + self.assertIn( + 'FROM pg_catalog.pg_backup_stop', + log_content) self.assertIn( 'setting its status to ERROR', From ca9553dae24a8782615b7528f1a59d972319cbbf Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 8 Nov 2022 02:58:35 +0300 Subject: [PATCH 070/339] don't redefine _(s) since we don't include libpq-int.h anymore --- src/pg_probackup.h | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 9a5934545..08fcaa09c 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -47,12 +47,6 @@ #error Windows port requires compilation in MinGW64 UCRT environment #endif -#if PG_VERSION_NUM >= 150000 -// _() is explicitly undefined in libpq-int.h -// https://github.com/postgres/postgres/commit/28ec316787674dd74d00b296724a009b6edc2fb0 -#define _(s) gettext(s) -#endif - /* Wrap the code that we're going to delete after refactoring in this define*/ #define REFACTORE_ME From cd388dc25611e19e983e72a5ca89710f138ab3b5 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 8 Nov 2022 20:38:42 +0300 Subject: [PATCH 071/339] move pio klass definitions to file.c --- src/utils/file.c | 19 +++++++++++++++++-- src/utils/file.h | 23 ++--------------------- 2 files changed, 19 insertions(+), 23 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index 8c708ae2f..14f32fe28 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -3924,13 +3924,18 @@ fio_communicate(int in, int out) } // CLASSES + typedef struct pioLocalDrive { } pioLocalDrive; +#define kls__pioLocalDrive iface__pioDrive, iface(pioDrive) +fobj_klass(pioLocalDrive); typedef struct pioRemoteDrive { } pioRemoteDrive; +#define kls__pioRemoteDrive iface__pioDrive, iface(pioDrive) +fobj_klass(pioRemoteDrive); typedef struct pioFile { @@ -3938,12 +3943,16 @@ typedef struct pioFile int flags; bool closed; } pioFile; +#define kls__pioFile mth(fobjDispose) +fobj_klass(pioFile); typedef struct pioLocalFile { pioFile p; int fd; } pioLocalFile; +#define kls__pioLocalFile iface__pioFile, iface(pioFile) +fobj_klass(pioLocalFile); typedef struct pioRemoteFile { @@ -3957,6 +3966,9 @@ typedef struct pioRemoteFile void* asyncChunk; ft_bytes_t chunkRest; } pioRemoteFile; +#define kls__pioRemoteFile iface__pioFile, iface(pioFile), \ + mth(pioSetAsync, pioAsyncRead, pioAsyncWrite, pioAsyncError) +fobj_klass(pioRemoteFile); typedef struct pioReadFilter { pioRead_i wrapped; @@ -3967,6 +3979,8 @@ typedef struct pioReadFilter { bool eof; bool finished; } pioReadFilter; +#define kls__pioReadFilter mth(pioRead, pioClose) +fobj_klass(pioReadFilter); typedef struct pioWriteFilter { pioWriteFlush_i wrapped; @@ -3975,6 +3989,9 @@ typedef struct pioWriteFilter { size_t capa; bool finished; } pioWriteFilter; +#define kls__pioWriteFilter iface__pioWriteFlush, iface(pioWriteFlush), \ + mth(pioClose) +fobj_klass(pioWriteFilter); #ifdef HAVE_LIBZ typedef struct pioGZCompress { @@ -4008,8 +4025,6 @@ pioDriveForLocation(fio_location loc) } /* Base physical file type */ -#define kls__pioFile mth(fobjDispose) -fobj_klass(pioFile); static void pioFile_fobjDispose(VSelf) diff --git a/src/utils/file.h b/src/utils/file.h index 61933cda6..245bbd6fb 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -270,21 +270,12 @@ fobj_method(pioListDir); fobj_method(pioRemoveDir); #define iface__pioDrive mth(pioOpen, pioStat, pioRemove, pioRename), \ - mth(pioExists, pioGetCRC32, pioIsRemote, pioListDir, pioRemoveDir) + mth(pioExists, pioGetCRC32, pioIsRemote), \ + mth(pioListDir, pioRemoveDir) fobj_iface(pioDrive); -#define kls__pioLocalDrive iface__pioDrive, iface(pioDrive) -#define kls__pioRemoteDrive iface__pioDrive, iface(pioDrive) -fobj_klass(pioLocalDrive); -fobj_klass(pioRemoteDrive); - extern pioDrive_i pioDriveForLocation(fio_location location); -#define pioFile__common_methods mth(pioRead, pioWrite, pioFlush, pioTruncate, pioClose) - -#define kls__pioLocalFile iface__pioFile, iface(pioFile) -fobj_klass(pioLocalFile); - #define mth__pioSetAsync err_i, (bool, async) #define mth__pioSetAsync__optional() (async, true) #define mth__pioAsyncRead size_t, (ft_bytes_t, buf), (err_i*, err) @@ -295,10 +286,6 @@ fobj_method(pioAsyncRead); fobj_method(pioAsyncWrite); fobj_method(pioAsyncError); -#define kls__pioRemoteFile iface__pioFile, iface(pioFile), \ - mth(pioSetAsync, pioAsyncRead, pioAsyncWrite, pioAsyncError) -fobj_klass(pioRemoteFile); - // Filter typedef struct pioTransformResult { size_t consumed; @@ -315,12 +302,6 @@ fobj_method(pioFinish); #define iface__pioFilter mth(pioTransform, pioFinish) fobj_iface(pioFilter); -#define kls__pioReadFilter mth(pioRead, pioClose) -#define kls__pioWriteFilter iface__pioWriteFlush, iface(pioWriteFlush), \ - mth(pioClose) -fobj_klass(pioReadFilter); -fobj_klass(pioWriteFilter); - extern pioWriteFlush_i pioWrapWriteFilter(pioWriteFlush_i fl, pioFilter_i flt, size_t buf_size); From 399b430a78f60d62b96fae8f5742a7b7aea195fc Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 9 Nov 2022 14:49:08 +0300 Subject: [PATCH 072/339] refactor: rename filter methods --- src/utils/file.c | 32 ++++++++++++++++---------------- src/utils/file.h | 14 +++++++------- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index 14f32fe28..ff71c986b 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -4976,7 +4976,7 @@ pioReadFilter_pioRead(VSelf, ft_bytes_t wbuf, err_i *err) { Self(pioReadFilter); fobj_reset_err(err); - pioTransformResult tr; + pioFltTransformResult tr; size_t wlen = wbuf.len; ft_bytes_t rbuf; size_t r; @@ -4990,7 +4990,7 @@ pioReadFilter_pioRead(VSelf, ft_bytes_t wbuf, err_i *err) rbuf = ft_bytes(self->buffer, self->len); while (rbuf.len > 0) { - tr = $i(pioTransform, self->filter, rbuf, wbuf, err); + tr = $i(pioFltTransform, self->filter, rbuf, wbuf, err); if ($haserr(*err)) return wlen - wbuf.len; ft_bytes_consume(&rbuf, tr.consumed); @@ -5022,7 +5022,7 @@ pioReadFilter_pioRead(VSelf, ft_bytes_t wbuf, err_i *err) while (wbuf.len > 0 && self->eof) { - r = $i(pioFinish, self->filter, wbuf, err); + r = $i(pioFltFinish, self->filter, wbuf, err); if ($haserr(*err)) return (ssize_t)(wlen - wbuf.len); ft_bytes_consume(&wbuf, r); @@ -5046,7 +5046,7 @@ pioReadFilter_pioClose(VSelf, bool sync) if (!self->finished) { - r = $i(pioFinish, self->filter, ft_bytes(NULL, 0), &err); + r = $i(pioFltFinish, self->filter, ft_bytes(NULL, 0), &err); ft_assert(r == 0); } if ($ifdef(errcl =, pioClose, self->wrapped.self, sync)) @@ -5092,7 +5092,7 @@ pioWriteFilter_pioWrite(VSelf, ft_bytes_t rbuf, err_i *err) { Self(pioWriteFilter); fobj_reset_err(err); - pioTransformResult tr; + pioFltTransformResult tr; size_t rlen = rbuf.len; ft_bytes_t wbuf; size_t r; @@ -5102,7 +5102,7 @@ pioWriteFilter_pioWrite(VSelf, ft_bytes_t rbuf, err_i *err) wbuf = ft_bytes(self->buffer, self->capa); while (wbuf.len > 0) { - tr = $i(pioTransform, self->filter, rbuf, wbuf, err); + tr = $i(pioFltTransform, self->filter, rbuf, wbuf, err); if ($haserr(*err)) return rlen - rbuf.len; ft_bytes_consume(&rbuf, tr.consumed); @@ -5146,7 +5146,7 @@ pioWriteFilter_pioFlush(VSelf) wbuf = ft_bytes(self->buffer, self->capa); while (wbuf.len > 0) { - r = $i(pioFinish, self->filter, wbuf, &err); + r = $i(pioFltFinish, self->filter, wbuf, &err); if ($haserr(err)) return err; ft_bytes_consume(&wbuf, r); @@ -5181,7 +5181,7 @@ pioWriteFilter_pioClose(VSelf, bool sync) if (!self->finished) { - r = $i(pioFinish, self->filter, ft_bytes(NULL, 0), &err); + r = $i(pioFltFinish, self->filter, ft_bytes(NULL, 0), &err); ft_assert(r == 0); } if ($ifdef(errcl =, pioClose, self->wrapped.self, sync)) @@ -5249,11 +5249,11 @@ pioGZDecompressFilter(bool ignoreTruncate) return bind_pioFilter(gz); } -static pioTransformResult -pioGZCompress_pioTransform(VSelf, ft_bytes_t rbuf, ft_bytes_t wbuf, err_i *err) +static pioFltTransformResult +pioGZCompress_pioFltTransform(VSelf, ft_bytes_t rbuf, ft_bytes_t wbuf, err_i *err) { Self(pioGZCompress); - pioTransformResult tr = {0, 0}; + pioFltTransformResult tr = {0, 0}; size_t rlen = rbuf.len; size_t wlen = wbuf.len; ssize_t rc; @@ -5285,7 +5285,7 @@ pioGZCompress_pioTransform(VSelf, ft_bytes_t rbuf, ft_bytes_t wbuf, err_i *err) } static size_t -pioGZCompress_pioFinish(VSelf, ft_bytes_t wbuf, err_i *err) +pioGZCompress_pioFltFinish(VSelf, ft_bytes_t wbuf, err_i *err) { Self(pioGZCompress); size_t wlen = wbuf.len; @@ -5338,11 +5338,11 @@ pioGZCompress_fobjRepr(VSelf) return $S("pioGZCompress"); } -static pioTransformResult -pioGZDecompress_pioTransform(VSelf, ft_bytes_t rbuf, ft_bytes_t wbuf, err_i* err) +static pioFltTransformResult +pioGZDecompress_pioFltTransform(VSelf, ft_bytes_t rbuf, ft_bytes_t wbuf, err_i* err) { Self(pioGZDecompress); - pioTransformResult tr = {0, 0}; + pioFltTransformResult tr = {0, 0}; size_t rlen = rbuf.len; size_t wlen = wbuf.len; int rc; @@ -5387,7 +5387,7 @@ pioGZDecompress_pioTransform(VSelf, ft_bytes_t rbuf, ft_bytes_t wbuf, err_i* err } static size_t -pioGZDecompress_pioFinish(VSelf, ft_bytes_t wbuf, err_i *err) +pioGZDecompress_pioFltFinish(VSelf, ft_bytes_t wbuf, err_i *err) { Self(pioGZDecompress); size_t wlen = wbuf.len; diff --git a/src/utils/file.h b/src/utils/file.h index 245bbd6fb..7e778d151 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -287,19 +287,19 @@ fobj_method(pioAsyncWrite); fobj_method(pioAsyncError); // Filter -typedef struct pioTransformResult { +typedef struct pioFltTransformResult { size_t consumed; size_t produced; -} pioTransformResult; +} pioFltTransformResult; -#define mth__pioTransform pioTransformResult, (ft_bytes_t, in), \ +#define mth__pioFltTransform pioFltTransformResult, (ft_bytes_t, in), \ (ft_bytes_t, out), \ (err_i*, err) -fobj_method(pioTransform); -#define mth__pioFinish size_t, (ft_bytes_t, out), (err_i*, err) -fobj_method(pioFinish); +fobj_method(pioFltTransform); +#define mth__pioFltFinish size_t, (ft_bytes_t, out), (err_i*, err) +fobj_method(pioFltFinish); -#define iface__pioFilter mth(pioTransform, pioFinish) +#define iface__pioFilter mth(pioFltTransform, pioFltFinish) fobj_iface(pioFilter); extern pioWriteFlush_i pioWrapWriteFilter(pioWriteFlush_i fl, From fed7014d461f38fae1319e1181ab0bac93b55b32 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 9 Nov 2022 15:41:57 +0300 Subject: [PATCH 073/339] rename pioFlush -> pioWriteFinish --- src/utils/file.c | 10 +++++----- src/utils/file.h | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index ff71c986b..4b5601968 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -4270,7 +4270,7 @@ pioLocalFile_pioWrite(VSelf, ft_bytes_t buf, err_i *err) } static err_i -pioLocalFile_pioFlush(VSelf) +pioLocalFile_pioWriteFinish(VSelf) { Self(pioLocalFile); ft_assert(self->fd >= 0, "Closed file abused \"%s\"", self->p.path); @@ -4816,7 +4816,7 @@ pioRemoteFile_pioWrite(VSelf, ft_bytes_t buf, err_i *err) } static err_i -pioRemoteFile_pioFlush(VSelf) +pioRemoteFile_pioWriteFinish(VSelf) { Self(pioRemoteFile); @@ -5134,7 +5134,7 @@ pioWriteFilter_pioWrite(VSelf, ft_bytes_t rbuf, err_i *err) } static err_i -pioWriteFilter_pioFlush(VSelf) +pioWriteFilter_pioWriteFinish(VSelf) { Self(pioWriteFilter); err_i err = $noerr(); @@ -5503,8 +5503,8 @@ pioCopyWithFilters(pioWriteFlush_i dest, pioRead_i src, } } - /* pioFlush will check for async error if destination was remote */ - err = $i(pioFlush, dest); + /* pioWriteFinish will check for async error if destination was remote */ + err = $i(pioWriteFinish, dest); if ($haserr(err)) $ireturn($err(SysErr, "Cannot flush file {path}: {cause}", path($irepr(dest)), cause(err.self))); diff --git a/src/utils/file.h b/src/utils/file.h index 7e778d151..6f4a8f2e1 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -224,16 +224,16 @@ fobj_error_cstr_key(gzErrStr); #define mth__pioRead size_t, (ft_bytes_t, buf), (err_i *, err) #define mth__pioWrite size_t, (ft_bytes_t, buf), (err_i *, err) #define mth__pioTruncate err_i, (size_t, sz) -#define mth__pioFlush err_i +#define mth__pioWriteFinish err_i fobj_method(pioClose); fobj_method(pioRead); fobj_method(pioWrite); fobj_method(pioTruncate); -fobj_method(pioFlush); +fobj_method(pioWriteFinish); -#define iface__pioFile mth(pioWrite, pioFlush, pioRead, pioTruncate, pioClose) -#define iface__pioWriteFlush mth(pioWrite, pioFlush) -#define iface__pioWriteCloser mth(pioWrite, pioFlush, pioClose) +#define iface__pioFile mth(pioWrite, pioWriteFinish, pioRead, pioTruncate, pioClose) +#define iface__pioWriteFlush mth(pioWrite, pioWriteFinish) +#define iface__pioWriteCloser mth(pioWrite, pioWriteFinish, pioClose) #define iface__pioReadCloser mth(pioRead, pioClose) fobj_iface(pioFile); fobj_iface(pioWriteFlush); From 5f419c38d6a9624cfe109a5fe9afcbc3afa8973c Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 10 Nov 2022 15:44:09 +0300 Subject: [PATCH 074/339] remove_temp_wal_file: accept raw interface instead of pointer. --- src/archive.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/archive.c b/src/archive.c index 90c54c15a..bcc0bccbc 100644 --- a/src/archive.c +++ b/src/archive.c @@ -359,9 +359,9 @@ push_file(WALSegno *xlogfile, const char *archive_status_dir, } static void -remove_temp_wal_file(pioDrive_i *backup_drive, char *partpath) +remove_temp_wal_file(pioDrive_i backup_drive, char *partpath) { - err_i remerr = $i(pioRemove, *backup_drive, partpath, false); + err_i remerr = $i(pioRemove, backup_drive, partpath, false); if ($haserr(remerr)) elog(WARNING, "Temp WAL: %s", $errmsg(remerr)); } @@ -527,7 +527,7 @@ push_file_internal(const char *wal_file_name, const char *pg_xlog_dir, "checksum, skip pushing: \"%s\"", from_fullpath); $i(pioClose, in); $i(pioClose, out); - remove_temp_wal_file(&backup_drive, to_fullpath_part); + remove_temp_wal_file(backup_drive, to_fullpath_part); return 1; } else if (overwrite) @@ -540,7 +540,7 @@ push_file_internal(const char *wal_file_name, const char *pg_xlog_dir, { $i(pioClose, in); $i(pioClose, out); - remove_temp_wal_file(&backup_drive, to_fullpath_part); + remove_temp_wal_file(backup_drive, to_fullpath_part); elog(ERROR, "WAL file already exists in archive with " "different checksum: \"%s\"", to_fullpath); @@ -575,14 +575,14 @@ push_file_internal(const char *wal_file_name, const char *pg_xlog_dir, if ($haserr(err)) { $i(pioClose, out); - remove_temp_wal_file(&backup_drive, to_fullpath_part); + remove_temp_wal_file(backup_drive, to_fullpath_part); elog(ERROR, "Copy WAL: %s", $errmsg(err)); } err = $i(pioClose, out, .sync = !no_sync); if ($haserr(err)) { - remove_temp_wal_file(&backup_drive, to_fullpath_part); + remove_temp_wal_file(backup_drive, to_fullpath_part); elog(ERROR, "Temp WAL: %s", $errmsg(err)); } @@ -590,7 +590,7 @@ push_file_internal(const char *wal_file_name, const char *pg_xlog_dir, err = $i(pioRename, backup_drive, to_fullpath_part, to_fullpath); if ($haserr(err)) { - remove_temp_wal_file(&backup_drive, to_fullpath_part); + remove_temp_wal_file(backup_drive, to_fullpath_part); elog(ERROR, "%s", $errmsg(err)); } From 818ef369fc636fdac0e65588b0c36d32d266fdc8 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 11 Nov 2022 21:33:13 +0300 Subject: [PATCH 075/339] grab_excl_lock_file: don't use fio_* This function checks for concurrent locker by "kill" command, which is strictly local. There is no way to make it remote reliably. More over, we have to write pid of remote agent. So, if for whatever reason we will need to lock backup on remote host, we'd better call this function from agent. And, it will be better to use fcntl(F_SETLK) on Unix and LockFileEx on Windows. But lets leave it for future. --- src/catalog.c | 92 +++++++++++++++++++++------------------------------ 1 file changed, 38 insertions(+), 54 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index 923943b2c..49f545b72 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -298,12 +298,17 @@ int grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) { char lock_file[MAXPGPATH]; - int fd = 0; + FILE *fp = NULL; char buffer[256]; int ntries = LOCK_TIMEOUT; int empty_tries = LOCK_STALE_TIMEOUT; - int len; - int encoded_pid; + size_t len; + pid_t encoded_pid; + int save_errno = 0; + enum { + GELF_FAILED_WRITE = 1, + GELF_FAILED_CLOSE = 2, + } failed_action = 0; join_path_components(lock_file, root_dir, BACKUP_LOCK_FILE); @@ -314,19 +319,17 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) */ do { - FILE *fp_out = NULL; - if (interrupted) elog(ERROR, "Interrupted while locking backup %s", backup_id); /* - * Try to create the lock file --- O_EXCL makes this atomic. + * Try to create the lock file --- "wx" makes this atomic. * * Think not to make the file protection weaker than 0600. See * comments below. */ - fd = fio_open(FIO_BACKUP_HOST, lock_file, O_RDWR | O_CREAT | O_EXCL); - if (fd >= 0) + fp = fopen(lock_file, "wx"); + if (fp != NULL) break; /* Success; exit the retry loop */ /* read-only fs is a special case */ @@ -342,7 +345,6 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) * If file already exists or we have some permission problem (???), * then retry; */ -// if ((errno != EEXIST && errno != EACCES)) if (errno != EEXIST) elog(ERROR, "Could not create lock file \"%s\": %s", lock_file, strerror(errno)); @@ -352,18 +354,19 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) * here: file might have been deleted since we tried to create it. */ - fp_out = fopen(lock_file, "r"); - if (fp_out == NULL) + fp = fopen(lock_file, "r"); + if (fp == NULL) { if (errno == ENOENT) continue; /* race condition; try again */ elog(ERROR, "Cannot open lock file \"%s\": %s", lock_file, strerror(errno)); } - len = fread(buffer, 1, sizeof(buffer) - 1, fp_out); - if (ferror(fp_out)) + len = fread(buffer, 1, sizeof(buffer) - 1, fp); + if (ferror(fp)) elog(ERROR, "Cannot read from lock file: \"%s\"", lock_file); - fclose(fp_out); + fclose(fp); + fp = NULL; /* * There are several possible reasons for lock file @@ -400,7 +403,7 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) continue; } - encoded_pid = atoi(buffer); + encoded_pid = (pid_t)atoll(buffer); if (encoded_pid <= 0) { @@ -450,7 +453,7 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) * it. Need a loop because of possible race condition against other * would-be creators. */ - if (fio_remove(FIO_BACKUP_HOST, lock_file, false) < 0) + if (remove(lock_file) < 0) { if (errno == ENOENT) continue; /* race condition, again */ @@ -461,40 +464,32 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) } while (ntries--); /* Failed to acquire exclusive lock in time */ - if (fd <= 0) + if (fp == NULL) return LOCK_FAIL_TIMEOUT; /* * Successfully created the file, now fill it. */ - snprintf(buffer, sizeof(buffer), "%lld\n", (long long)my_pid); - errno = 0; - if (fio_write(fd, buffer, strlen(buffer)) != strlen(buffer)) - { - int save_errno = errno; + fprintf(fp, "%lld\n", (long long)my_pid); + fflush(fp); - fio_close(fd); - if (fio_remove(FIO_BACKUP_HOST, lock_file, false) != 0) - elog(WARNING, "Cannot remove lock file \"%s\": %s", lock_file, strerror(errno)); - - /* In lax mode if we failed to grab lock because of 'out of space error', - * then treat backup as locked. - * Only delete command should be run in lax mode. - */ - if (!strict && save_errno == ENOSPC) - return LOCK_FAIL_ENOSPC; - else - elog(ERROR, "Could not write lock file \"%s\": %s", - lock_file, strerror(save_errno)); + if (ferror(fp)) + { + failed_action = GELF_FAILED_WRITE; + save_errno = errno; + clearerr(fp); } - if (fio_flush(fd) != 0) + if (fclose(fp) && save_errno == 0) { - int save_errno = errno; + failed_action = GELF_FAILED_CLOSE; + save_errno = errno; + } - fio_close(fd); - if (fio_remove(FIO_BACKUP_HOST, lock_file, false) != 0) + if (save_errno) + { + if (remove(lock_file) != 0) elog(WARNING, "Cannot remove lock file \"%s\": %s", lock_file, strerror(errno)); /* In lax mode if we failed to grab lock because of 'out of space error', @@ -503,21 +498,10 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) */ if (!strict && save_errno == ENOSPC) return LOCK_FAIL_ENOSPC; - else - elog(ERROR, "Could not flush lock file \"%s\": %s", - lock_file, strerror(save_errno)); - } - - if (fio_close(fd) != 0) - { - int save_errno = errno; - - if (fio_remove(FIO_BACKUP_HOST, lock_file, false) != 0) - elog(WARNING, "Cannot remove lock file \"%s\": %s", lock_file, strerror(errno)); - - if (!strict && save_errno == ENOSPC) - return LOCK_FAIL_ENOSPC; - else + else if (failed_action == GELF_FAILED_WRITE) + elog(ERROR, "Could not write lock file \"%s\": %s", + lock_file, strerror(save_errno)); + else if (failed_action == GELF_FAILED_CLOSE) elog(ERROR, "Could not close lock file \"%s\": %s", lock_file, strerror(save_errno)); } From f6e7be6d994a6c0047d2be8e22f6101407a445c4 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 13 Nov 2022 23:40:32 +0300 Subject: [PATCH 076/339] simplify grabbing and releasing shared lock --- src/catalog.c | 163 ++++++++++++++++++++------------------------------ 1 file changed, 66 insertions(+), 97 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index 49f545b72..baf757e0f 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -28,7 +28,7 @@ static bool backup_lock_exit_hook_registered = false; static parray *locks = NULL; static int grab_excl_lock_file(const char *backup_dir, const char *backup_id, bool strict); -static int grab_shared_lock_file(pgBackup *backup); +static int grab_shared_lock_file(const char *backup_dir); static int wait_shared_owners(pgBackup *backup); @@ -231,7 +231,7 @@ lock_backup(pgBackup *backup, bool strict, bool exclusive) if (exclusive) rc = wait_shared_owners(backup); else - rc = grab_shared_lock_file(backup); + rc = grab_shared_lock_file(backup->root_dir); if (rc != 0) { @@ -600,26 +600,21 @@ wait_shared_owners(pgBackup *backup) return 0; } +#define FT_SLICE pid +#define FT_SLICE_TYPE pid_t +#include + /* - * Lock backup in shared mode - * 0 - successs - * 1 - fail + * returns array of pids stored in shared lock file and still alive. + * It excludes our own pid, so no need to exclude it explicitely. */ -int -grab_shared_lock_file(pgBackup *backup) +static ft_arr_pid_t +read_shared_lock_file(const char *lock_file) { FILE *fp_in = NULL; - FILE *fp_out = NULL; char buf_in[256]; pid_t encoded_pid; - char lock_file[MAXPGPATH]; - - char buffer[8192]; /*TODO: should be enough, but maybe malloc+realloc is better ? */ - char lock_file_tmp[MAXPGPATH]; - int buffer_len = 0; - - join_path_components(lock_file, backup->root_dir, BACKUP_RO_LOCK_FILE); - snprintf(lock_file_tmp, MAXPGPATH, "%s%s", lock_file, "tmp"); + ft_arr_pid_t pids = ft_arr_init(); /* open already existing lock files */ fp_in = fopen(lock_file, "r"); @@ -629,7 +624,7 @@ grab_shared_lock_file(pgBackup *backup) /* read PIDs of owners */ while (fp_in && fgets(buf_in, sizeof(buf_in), fp_in)) { - encoded_pid = atoi(buf_in); + encoded_pid = (pid_t)atoll(buf_in); if (encoded_pid <= 0) { elog(WARNING, "Bogus data in lock file \"%s\": \"%s\"", lock_file, buf_in); @@ -645,11 +640,11 @@ grab_shared_lock_file(pgBackup *backup) * Somebody is still using this backup in shared mode, * copy this pid into a new file. */ - buffer_len += snprintf(buffer+buffer_len, 4096, "%llu\n", (long long)encoded_pid); + ft_arr_pid_push(&pids, encoded_pid); } else if (errno != ESRCH) elog(ERROR, "Failed to send signal 0 to a process %lld: %s", - (long long)encoded_pid, strerror(errno)); + (long long)encoded_pid, strerror(errno)); } if (fp_in) @@ -659,31 +654,69 @@ grab_shared_lock_file(pgBackup *backup) fclose(fp_in); } + return pids; +} + +static void +write_shared_lock_file(const char *lock_file, ft_arr_pid_t pids) +{ + FILE *fp_out = NULL; + char lock_file_tmp[MAXPGPATH]; + ssize_t i; + + snprintf(lock_file_tmp, MAXPGPATH, "%s%s", lock_file, "tmp"); + fp_out = fopen(lock_file_tmp, "w"); if (fp_out == NULL) { if (errno == EROFS) - return 0; + return; elog(ERROR, "Cannot open temp lock file \"%s\": %s", lock_file_tmp, strerror(errno)); } - /* add my own pid */ - buffer_len += snprintf(buffer+buffer_len, sizeof(buffer), "%llu\n", (long long)my_pid); - /* write out the collected PIDs to temp lock file */ - fwrite(buffer, 1, buffer_len, fp_out); + for (i = 0; i < pids.len; i++) + fprintf(fp_out, "%lld\n", (long long)ft_arr_pid_at(&pids, i)); + fflush(fp_out); if (ferror(fp_out)) + { + fclose(fp_out); + remove(lock_file_tmp); elog(ERROR, "Cannot write to lock file: \"%s\"", lock_file_tmp); + } if (fclose(fp_out) != 0) + { + remove(lock_file_tmp); elog(ERROR, "Cannot close temp lock file \"%s\": %s", lock_file_tmp, strerror(errno)); + } if (rename(lock_file_tmp, lock_file) < 0) elog(ERROR, "Cannot rename file \"%s\" to \"%s\": %s", - lock_file_tmp, lock_file, strerror(errno)); + lock_file_tmp, lock_file, strerror(errno)); +} +/* + * Lock backup in shared mode + * 0 - successs + * 1 - fail + */ +int +grab_shared_lock_file(const char *backup_dir) +{ + char lock_file[MAXPGPATH]; + ft_arr_pid_t pids; + + join_path_components(lock_file, backup_dir, BACKUP_RO_LOCK_FILE); + + pids = read_shared_lock_file(lock_file); + /* add my own pid */ + ft_arr_pid_push(&pids, my_pid); + + write_shared_lock_file(lock_file, pids); + ft_arr_pid_free(&pids); return 0; } @@ -723,87 +756,23 @@ release_excl_lock_file(const char *backup_dir) void release_shared_lock_file(const char *backup_dir) { - FILE *fp_in = NULL; - FILE *fp_out = NULL; - char buf_in[256]; - pid_t encoded_pid; char lock_file[MAXPGPATH]; - - char buffer[8192]; /*TODO: should be enough, but maybe malloc+realloc is better ? */ - char lock_file_tmp[MAXPGPATH]; - int buffer_len = 0; + ft_arr_pid_t pids; join_path_components(lock_file, backup_dir, BACKUP_RO_LOCK_FILE); - snprintf(lock_file_tmp, MAXPGPATH, "%s%s", lock_file, "tmp"); - /* open lock file */ - fp_in = fopen(lock_file, "r"); - if (fp_in == NULL) + pids = read_shared_lock_file(lock_file); + /* read_shared_lock_file already had deleted my own pid */ + if (pids.len == 0) { - if (errno == ENOENT) - return; - else - elog(ERROR, "Cannot open lock file \"%s\": %s", lock_file, strerror(errno)); - } - - /* read PIDs of owners */ - while (fgets(buf_in, sizeof(buf_in), fp_in)) - { - encoded_pid = atoi(buf_in); - - if (encoded_pid <= 0) - { - elog(WARNING, "Bogus data in lock file \"%s\": \"%s\"", lock_file, buf_in); - continue; - } - - /* remove my pid */ - if (encoded_pid == my_pid) - continue; - - if (kill(encoded_pid, 0) == 0) - { - /* - * Somebody is still using this backup in shared mode, - * copy this pid into a new file. - */ - buffer_len += snprintf(buffer+buffer_len, 4096, "%llu\n", (long long)encoded_pid); - } - else if (errno != ESRCH) - elog(ERROR, "Failed to send signal 0 to a process %lld: %s", - (long long)encoded_pid, strerror(errno)); - } - - if (ferror(fp_in)) - elog(ERROR, "Cannot read from lock file: \"%s\"", lock_file); - fclose(fp_in); - - /* if there is no active pid left, then there is nothing to do */ - if (buffer_len == 0) - { - if (fio_remove(FIO_BACKUP_HOST, lock_file, false) != 0) + ft_arr_pid_free(&pids); + if (remove(lock_file) != 0) elog(ERROR, "Cannot remove shared lock file \"%s\": %s", lock_file, strerror(errno)); return; } - fp_out = fopen(lock_file_tmp, "w"); - if (fp_out == NULL) - elog(ERROR, "Cannot open temp lock file \"%s\": %s", lock_file_tmp, strerror(errno)); - - /* write out the collected PIDs to temp lock file */ - fwrite(buffer, 1, buffer_len, fp_out); - - if (ferror(fp_out)) - elog(ERROR, "Cannot write to lock file: \"%s\"", lock_file_tmp); - - if (fclose(fp_out) != 0) - elog(ERROR, "Cannot close temp lock file \"%s\": %s", lock_file_tmp, strerror(errno)); - - if (rename(lock_file_tmp, lock_file) < 0) - elog(ERROR, "Cannot rename file \"%s\" to \"%s\": %s", - lock_file_tmp, lock_file, strerror(errno)); - - return; + write_shared_lock_file(lock_file, pids); + ft_arr_pid_free(&pids); } /* From 2da1f31d3c82e95de472baf010ebfae6a42cbdc9 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 14 Nov 2022 23:37:41 +0300 Subject: [PATCH 077/339] remove unused fio_gzeof and fio_gzwrite. --- src/utils/file.c | 141 ++++++----------------------------------------- src/utils/file.h | 2 - 2 files changed, 18 insertions(+), 125 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index 4b5601968..e7c5bdd46 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1621,7 +1621,6 @@ typedef struct fioGZFile z_stream strm; int fd; int errnum; - bool compress; bool eof; Bytef buf[ZLIB_BUFFER_SIZE]; } fioGZFile; @@ -1657,48 +1656,30 @@ gzFile fio_gzopen(fio_location location, const char* path, const char* mode, int level) { int rc; + + if (strchr(mode, 'w') != NULL) /* compress */ + { + Assert(false); + elog(ERROR, "fio_gzopen(\"wb\") is not implemented"); + } + if (fio_is_remote(location)) { fioGZFile* gz = (fioGZFile*) pgut_malloc(sizeof(fioGZFile)); memset(&gz->strm, 0, sizeof(gz->strm)); gz->eof = 0; gz->errnum = Z_OK; - /* check if file opened for writing */ - if (strcmp(mode, PG_BINARY_W) == 0) /* compress */ + gz->strm.next_in = gz->buf; + gz->strm.avail_in = ZLIB_BUFFER_SIZE; + rc = inflateInit2(&gz->strm, 15 + 16); + gz->strm.avail_in = 0; + if (rc == Z_OK) { - gz->strm.next_out = gz->buf; - gz->strm.avail_out = ZLIB_BUFFER_SIZE; - rc = deflateInit2(&gz->strm, - level, - Z_DEFLATED, - MAX_WBITS + 16, DEF_MEM_LEVEL, - Z_DEFAULT_STRATEGY); - if (rc == Z_OK) + gz->fd = fio_open(location, path, O_RDONLY | PG_BINARY); + if (gz->fd < 0) { - gz->compress = 1; - gz->fd = fio_open(location, path, O_WRONLY | O_CREAT | O_EXCL | PG_BINARY); - if (gz->fd < 0) - { - free(gz); - return NULL; - } - } - } - else - { - gz->strm.next_in = gz->buf; - gz->strm.avail_in = ZLIB_BUFFER_SIZE; - rc = inflateInit2(&gz->strm, 15 + 16); - gz->strm.avail_in = 0; - if (rc == Z_OK) - { - gz->compress = 0; - gz->fd = fio_open(location, path, O_RDONLY | PG_BINARY); - if (gz->fd < 0) - { - free(gz); - return NULL; - } + free(gz); + return NULL; } } if (rc != Z_OK) @@ -1711,16 +1692,7 @@ fio_gzopen(fio_location location, const char* path, const char* mode, int level) else { gzFile file; - /* check if file opened for writing */ - if (strcmp(mode, PG_BINARY_W) == 0) - { - int fd = open(path, O_WRONLY | O_CREAT | O_EXCL | PG_BINARY, FILE_PERMISSION); - if (fd < 0) - return NULL; - file = gzdopen(fd, mode); - } - else - file = gzopen(path, mode); + file = gzopen(path, mode); if (file != NULL && level != Z_DEFAULT_COMPRESSION) { if (gzsetparams(file, level, Z_DEFAULT_STRATEGY) != Z_OK) @@ -1796,54 +1768,6 @@ fio_gzread(gzFile f, void *buf, unsigned size) } } -int -fio_gzwrite(gzFile f, void const* buf, unsigned size) -{ - if ((size_t)f & FIO_GZ_REMOTE_MARKER) - { - int rc; - fioGZFile* gz = (fioGZFile*)((size_t)f - FIO_GZ_REMOTE_MARKER); - - gz->strm.next_in = (Bytef *)buf; - gz->strm.avail_in = size; - - do - { - if (gz->strm.avail_out == ZLIB_BUFFER_SIZE) /* Compress buffer is empty */ - { - gz->strm.next_out = gz->buf; /* Reset pointer to the beginning of buffer */ - - if (gz->strm.avail_in != 0) /* Has something in input buffer */ - { - rc = deflate(&gz->strm, Z_NO_FLUSH); - Assert(rc == Z_OK); - gz->strm.next_out = gz->buf; /* Reset pointer to the beginning of buffer */ - } - else - { - break; - } - } - rc = fio_write_async(gz->fd, gz->strm.next_out, ZLIB_BUFFER_SIZE - gz->strm.avail_out); - if (rc >= 0) - { - gz->strm.next_out += rc; - gz->strm.avail_out += rc; - } - else - { - return rc; - } - } while (gz->strm.avail_out != ZLIB_BUFFER_SIZE || gz->strm.avail_in != 0); - - return size; - } - else - { - return gzwrite(f, buf, size); - } -} - int fio_gzclose(gzFile f) { @@ -1851,22 +1775,7 @@ fio_gzclose(gzFile f) { fioGZFile* gz = (fioGZFile*)((size_t)f - FIO_GZ_REMOTE_MARKER); int rc; - if (gz->compress) - { - gz->strm.next_out = gz->buf; - rc = deflate(&gz->strm, Z_FINISH); - Assert(rc == Z_STREAM_END && gz->strm.avail_out != ZLIB_BUFFER_SIZE); - deflateEnd(&gz->strm); - rc = fio_write(gz->fd, gz->buf, ZLIB_BUFFER_SIZE - gz->strm.avail_out); - if (rc != ZLIB_BUFFER_SIZE - gz->strm.avail_out) - { - return -1; - } - } - else - { - inflateEnd(&gz->strm); - } + inflateEnd(&gz->strm); rc = fio_close(gz->fd); free(gz); return rc; @@ -1877,20 +1786,6 @@ fio_gzclose(gzFile f) } } -int -fio_gzeof(gzFile f) -{ - if ((size_t)f & FIO_GZ_REMOTE_MARKER) - { - fioGZFile* gz = (fioGZFile*)((size_t)f - FIO_GZ_REMOTE_MARKER); - return gz->eof; - } - else - { - return gzeof(f); - } -} - const char* fio_gzerror(gzFile f, int *errnum) { diff --git a/src/utils/file.h b/src/utils/file.h index 6f4a8f2e1..d3d1cf9e7 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -152,8 +152,6 @@ extern int fio_close_stream(FILE* f); extern gzFile fio_gzopen(fio_location location, const char* path, const char* mode, int level); extern int fio_gzclose(gzFile file); extern int fio_gzread(gzFile f, void *buf, unsigned size); -extern int fio_gzwrite(gzFile f, void const* buf, unsigned size); -extern int fio_gzeof(gzFile f); extern z_off_t fio_gzseek(gzFile f, z_off_t offset, int whence); extern const char* fio_gzerror(gzFile file, int *errnum); #endif From b8d6ab00dd1cf75bd408acfd6e6a21e942fab2fa Mon Sep 17 00:00:00 2001 From: Sergey Fukanchik Date: Tue, 15 Nov 2022 10:58:53 +0300 Subject: [PATCH 078/339] [PBCKP-314] fio_mkdir has been replaced with pioMakeDir --- Makefile | 2 +- src/backup.c | 19 ++++++++++++-- src/catalog.c | 39 ++++++++++++++++++++-------- src/catchup.c | 33 ++++++++++++++++++++---- src/dir.c | 34 +++++++++++++++++++++--- src/init.c | 39 ++++++++++++++++++++++++---- src/merge.c | 8 +++++- src/pg_probackup.h | 3 +++ src/restore.c | 21 ++++++++++++--- src/utils/file.c | 64 +++++++++++++++++++++++----------------------- src/utils/file.h | 5 ++-- 11 files changed, 202 insertions(+), 65 deletions(-) diff --git a/Makefile b/Makefile index 54fd6cb10..3c8cb2c15 100644 --- a/Makefile +++ b/Makefile @@ -95,7 +95,7 @@ borrowed.mk: $(firstword $(MAKEFILE_LIST)) $(file >$@,# This file is autogenerated. Do not edit!) $(foreach borrowed_file, $(BORROWED_H_SRC) $(BORROWED_C_SRC), \ $(file >>$@,$(addprefix $(BORROW_DIR)/, $(notdir $(borrowed_file))): | $(CURDIR)/$(BORROW_DIR)/ $(realpath $(top_srcdir)/$(borrowed_file))) \ - $(file >>$@,$(shell echo " "'$$(LN_S) $(realpath $(top_srcdir)/$(borrowed_file)) $$@')) \ + $(file >>$@,$(shell echo " "'$$(LN_S) -f $(realpath $(top_srcdir)/$(borrowed_file)) $$@')) \ ) include borrowed.mk diff --git a/src/backup.c b/src/backup.c index 5673e700d..4845bdcff 100644 --- a/src/backup.c +++ b/src/backup.c @@ -250,9 +250,15 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, if (current.stream) { char stream_xlog_path[MAXPGPATH]; + err_i err; join_path_components(stream_xlog_path, current.database_dir, PG_XLOG_DIR); - fio_mkdir(FIO_BACKUP_HOST, stream_xlog_path, DIR_PERMISSION, false); + err = $i(pioMakeDir, current.backup_location, .path = stream_xlog_path, + .mode = DIR_PERMISSION, .strict = false); + if ($haserr(err)) + { + elog(WARNING, "%s", $errmsg(err)); + } start_WAL_streaming(backup_conn, stream_xlog_path, &instance_config.conn_opt, current.start_lsn, current.tli, true); @@ -400,7 +406,16 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, join_path_components(dirpath, current.database_dir, file->rel_path); elog(LOG, "Create directory '%s'", dirpath); - fio_mkdir(FIO_BACKUP_HOST, dirpath, DIR_PERMISSION, false); + { + err_i err; + + err = $i(pioMakeDir, current.backup_location, .path = dirpath, + .mode = DIR_PERMISSION, .strict = false); + if ($haserr(err)) + { + elog(WARNING, "%s", $errmsg(err)); + } + } } } diff --git a/src/catalog.c b/src/catalog.c index 923943b2c..ee781287c 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -22,7 +22,7 @@ static pgBackup* get_closest_backup(timelineInfo *tlinfo); static pgBackup* get_oldest_backup(timelineInfo *tlinfo); static const char *backupModes[] = {"", "PAGE", "PTRACK", "DELTA", "FULL"}; static pgBackup *readBackupControlFile(const char *path); -static int create_backup_dir(pgBackup *backup, const char *backup_instance_path); +static err_i create_backup_dir(pgBackup *backup, const char *backup_instance_path); static bool backup_lock_exit_hook_registered = false; static parray *locks = NULL; @@ -1461,9 +1461,11 @@ pgBackupInitDir(pgBackup *backup, const char *backup_instance_path) int i; char temp[MAXPGPATH]; parray *subdirs; + err_i err; /* Try to create backup directory at first */ - if (create_backup_dir(backup, backup_instance_path) != 0) + err = create_backup_dir(backup, backup_instance_path); + if ($haserr(err)) { /* Clear backup_id as indication of error */ backup->backup_id = INVALID_BACKUP_ID; @@ -1498,8 +1500,14 @@ pgBackupInitDir(pgBackup *backup, const char *backup_instance_path) /* create directories for actual backup files */ for (i = 0; i < parray_num(subdirs); i++) { + err_i err; join_path_components(temp, backup->root_dir, parray_get(subdirs, i)); - fio_mkdir(FIO_BACKUP_HOST, temp, DIR_PERMISSION, false); + err = $i(pioMakeDir, backup->backup_location, .path = temp, + .mode = DIR_PERMISSION, .strict = false); + if ($haserr(err)) + { + elog(WARNING, "%s", $errmsg(err)); + } } free_dir_list(subdirs); @@ -1512,22 +1520,24 @@ pgBackupInitDir(pgBackup *backup, const char *backup_instance_path) * 0 - ok * -1 - error (warning message already emitted) */ -int +static err_i create_backup_dir(pgBackup *backup, const char *backup_instance_path) { - int rc; char path[MAXPGPATH]; + err_i err; join_path_components(path, backup_instance_path, base36enc(backup->backup_id)); /* TODO: add wrapper for remote mode */ - rc = fio_mkdir(FIO_BACKUP_HOST, path, DIR_PERMISSION, true); - - if (rc == 0) + err = $i(pioMakeDir, backup->backup_location, .path = path, + .mode = DIR_PERMISSION, .strict = true); + if (!$haserr(err)) + { backup->root_dir = pgut_strdup(path); - else - elog(WARNING, "Cannot create directory \"%s\": %s", path, strerror(errno)); - return rc; + } else { + elog(WARNING, "%s", $errmsg(err)); + } + return err; } /* @@ -2969,6 +2979,9 @@ pgBackupInit(pgBackup *backup) backup->files = NULL; backup->note = NULL; backup->content_crc = 0; + + backup->backup_location = pioDriveForLocation(FIO_BACKUP_HOST); + backup->database_location = pioDriveForLocation(FIO_DB_HOST); } /* free pgBackup object */ @@ -2977,6 +2990,10 @@ pgBackupFree(void *backup) { pgBackup *b = (pgBackup *) backup; + /* Both point to global static vars */ + b->backup_location.self = NULL; + b->database_location.self = NULL; + pg_free(b->primary_conninfo); pg_free(b->external_dir_str); pg_free(b->root_dir); diff --git a/src/catchup.c b/src/catchup.c index 8034fba0a..7f82658ed 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -613,6 +613,7 @@ int do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, bool sync_dest_files, parray *exclude_absolute_paths_list, parray *exclude_relative_paths_list) { + pioDrive_i local_location = pioDriveForLocation(FIO_LOCAL_HOST); PGconn *source_conn = NULL; PGNodeInfo source_node_info; bool backup_logs = false; @@ -704,7 +705,14 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, join_path_components(dest_xlog_path, dest_pgdata, PG_XLOG_DIR); if (!dry_run) { - fio_mkdir(FIO_LOCAL_HOST, dest_xlog_path, DIR_PERMISSION, false); + err_i err; + + err = $i(pioMakeDir, local_location, .path = dest_xlog_path, + .mode = DIR_PERMISSION, .strict = false); + if($haserr(err)) + { + elog(WARNING, "%s", $errmsg(err)); + } start_WAL_streaming(source_conn, dest_xlog_path, &instance_config.conn_opt, current.start_lsn, current.tli, false); } @@ -820,7 +828,16 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, elog(LOG, "Create directory '%s'", dirpath); if (!dry_run) - fio_mkdir(FIO_LOCAL_HOST, dirpath, DIR_PERMISSION, false); + { + err_i err; + + err = $i(pioMakeDir, local_location, .path = dirpath, + .mode = DIR_PERMISSION, .strict = false); + if ($haserr(err)) + { + elog(WARNING, "%s", $errmsg(err)); + } + } } else { @@ -853,10 +870,16 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, if (!dry_run) { + err_i err; + /* create tablespace directory */ - if (fio_mkdir(FIO_LOCAL_HOST, linked_path, file->mode, false) != 0) - elog(ERROR, "Could not create tablespace directory \"%s\": %s", - linked_path, strerror(errno)); + err = $i(pioMakeDir, local_location, .path = linked_path, + .mode = file->mode, .strict = false); + if ($haserr(err)) + { + elog(ERROR, "Could not create tablespace directory \"%s\": \"%s\"", + linked_path, $errmsg(err)); + } /* create link to linked_path */ if (fio_symlink(FIO_LOCAL_HOST, linked_path, to_path, true) < 0) diff --git a/src/dir.c b/src/dir.c index 237566c1a..6194ea8bc 100644 --- a/src/dir.c +++ b/src/dir.c @@ -847,6 +847,7 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba bool extract_tablespaces, bool incremental, fio_location location, const char* waldir_path) { + pioDrive_i drive = pioDriveForLocation(location); int i; parray *links = NULL; mode_t pg_tablespace_mode = DIR_PERMISSION; @@ -932,7 +933,16 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba waldir_path, to_path); /* create tablespace directory from waldir_path*/ - fio_mkdir(location, waldir_path, pg_tablespace_mode, false); + { + err_i err; + + err = $i(pioMakeDir, drive, .path = waldir_path, + .mode = pg_tablespace_mode, .strict = false); + if ($haserr(err)) + { + elog(WARNING, "%s", $errmsg(err)); + } + } /* create link to linked_path */ if (fio_symlink(location, waldir_path, to_path, incremental) < 0) @@ -974,7 +984,16 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba linked_path, to_path); /* create tablespace directory */ - fio_mkdir(location, linked_path, pg_tablespace_mode, false); + { + err_i err; + + err = $i(pioMakeDir, drive, .path = linked_path, + .mode = pg_tablespace_mode, .strict = false); + if ($haserr(err)) + { + elog(WARNING, "%s", $errmsg(err)); + } + } /* create link to linked_path */ if (fio_symlink(location, linked_path, to_path, incremental) < 0) @@ -992,7 +1011,16 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba join_path_components(to_path, data_dir, dir->rel_path); // TODO check exit code - fio_mkdir(location, to_path, dir->mode, false); + { + err_i err; + + err = $i(pioMakeDir, drive, .path = to_path, .mode = dir->mode, + .strict = false); + if ($haserr(err)) + { + elog(WARNING, "%s", $errmsg(err)); + } + } } if (extract_tablespaces) diff --git a/src/init.c b/src/init.c index 511256aa3..ce7ad6ee2 100644 --- a/src/init.c +++ b/src/init.c @@ -18,7 +18,9 @@ int do_init(CatalogState *catalogState) { + pioDrive_i backup_location = pioDriveForLocation(FIO_BACKUP_HOST); int results; + err_i err; results = pg_check_dir(catalogState->catalog_path); @@ -32,13 +34,28 @@ do_init(CatalogState *catalogState) } /* create backup catalog root directory */ - fio_mkdir(FIO_BACKUP_HOST, catalogState->catalog_path, DIR_PERMISSION, false); + err = $i(pioMakeDir, backup_location, .path = catalogState->catalog_path, + .mode = DIR_PERMISSION, .strict = false); + if ($haserr(err)) + { + elog(WARNING, "%s", $errmsg(err)); + } /* create backup catalog data directory */ - fio_mkdir(FIO_BACKUP_HOST, catalogState->backup_subdir_path, DIR_PERMISSION, false); + err = $i(pioMakeDir, backup_location, .path = catalogState->backup_subdir_path, + .mode = DIR_PERMISSION, .strict = false); + if ($haserr(err)) + { + elog(WARNING, "%s", $errmsg(err)); + } /* create backup catalog wal directory */ - fio_mkdir(FIO_BACKUP_HOST, catalogState->wal_subdir_path, DIR_PERMISSION, false); + err = $i(pioMakeDir, backup_location, .path = catalogState->wal_subdir_path, + .mode = DIR_PERMISSION, .strict = false); + if ($haserr(err)) + { + elog(WARNING, "%s", $errmsg(err)); + } elog(INFO, "Backup catalog '%s' successfully inited", catalogState->catalog_path); return 0; @@ -47,8 +64,10 @@ do_init(CatalogState *catalogState) int do_add_instance(InstanceState *instanceState, InstanceConfig *instance) { + pioDrive_i backup_location = pioDriveForLocation(FIO_BACKUP_HOST); struct stat st; CatalogState *catalogState = instanceState->catalog_state; + err_i err; /* PGDATA is always required */ if (instance->pgdata == NULL) @@ -85,8 +104,18 @@ do_add_instance(InstanceState *instanceState, InstanceConfig *instance) instanceState->instance_name, instanceState->instance_wal_subdir_path); /* Create directory for data files of this specific instance */ - fio_mkdir(FIO_BACKUP_HOST, instanceState->instance_backup_subdir_path, DIR_PERMISSION, false); - fio_mkdir(FIO_BACKUP_HOST, instanceState->instance_wal_subdir_path, DIR_PERMISSION, false); + err = $i(pioMakeDir, backup_location, .path = instanceState->instance_backup_subdir_path, + .mode = DIR_PERMISSION, .strict = false); + if ($haserr(err)) + { + elog(WARNING, "%s", $errmsg(err)); + } + err = $i(pioMakeDir, backup_location, .path = instanceState->instance_wal_subdir_path, + .mode = DIR_PERMISSION, .strict = false); + if ($haserr(err)) + { + elog(WARNING, "%s", $errmsg(err)); + } /* * Write initial configuration file. diff --git a/src/merge.c b/src/merge.c index ded57c926..61e3aa772 100644 --- a/src/merge.c +++ b/src/merge.c @@ -641,11 +641,17 @@ merge_chain(InstanceState *instanceState, { char dirpath[MAXPGPATH]; char new_container[MAXPGPATH]; + err_i err; makeExternalDirPathByNum(new_container, full_external_prefix, file->external_dir_num); join_path_components(dirpath, new_container, file->rel_path); - fio_mkdir(FIO_BACKUP_HOST, dirpath, DIR_PERMISSION, false); + err = $i(pioMakeDir, dest_backup->backup_location, .path = dirpath, + .mode = DIR_PERMISSION, .strict = false); + if ($haserr(err)) + { + elog(WARNING, "%s", $errmsg(err)); + } } pg_atomic_init_flag(&file->lock); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 08fcaa09c..41f888d18 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -488,6 +488,9 @@ struct pgBackup /* map used for access to page headers */ HeaderMap hdr_map; + + pioDrive_i database_location; /* Where to backup from/restore to */ + pioDrive_i backup_location; /* Where to save to/read from */ }; /* Recovery target for restore and validate subcommands */ diff --git a/src/restore.c b/src/restore.c index ebd9bae22..7fc15f34d 100644 --- a/src/restore.c +++ b/src/restore.c @@ -816,8 +816,17 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, elog(LOG, "Restore external directories"); for (i = 0; i < parray_num(external_dirs); i++) - fio_mkdir(FIO_DB_HOST, parray_get(external_dirs, i), - DIR_PERMISSION, false); + { + char *dirpath = parray_get(external_dirs, i); + err_i err; + + err = $i(pioMakeDir, dest_backup->database_location, + .path = dirpath, .mode = DIR_PERMISSION, .strict = false); + if ($haserr(err)) + { + elog(WARNING, "%s", $errmsg(err)); + } + } } /* @@ -835,6 +844,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, { char *external_path; char dirpath[MAXPGPATH]; + err_i err; if (parray_num(external_dirs) < file->external_dir_num - 1) elog(ERROR, "Inconsistent external directory backup metadata"); @@ -843,7 +853,12 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, join_path_components(dirpath, external_path, file->rel_path); elog(LOG, "Create external directory \"%s\"", dirpath); - fio_mkdir(FIO_DB_HOST, dirpath, file->mode, false); + err = $i(pioMakeDir, dest_backup->database_location, .path = dirpath, + .mode = file->mode, .strict = false); + if ($haserr(err)) + { + elog(WARNING, "%s", $errmsg(err)); + } } } diff --git a/src/utils/file.c b/src/utils/file.c index 4b5601968..33e154517 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1531,39 +1531,8 @@ dir_create_dir(const char *dir, mode_t mode, bool strict) } /* - * Create directory + * Executed by remote agent. */ -int -fio_mkdir(fio_location location, const char* path, int mode, bool strict) -{ - if (fio_is_remote(location)) - { - fio_header hdr = { - .cop = FIO_MKDIR, - .handle = strict ? 1 : 0, /* ugly "hack" to pass more params*/ - .size = strlen(path) + 1, - .arg = mode, - }; - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(fio_stdout, path, hdr.size), hdr.size); - - IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); - Assert(hdr.cop == FIO_MKDIR); - - if (hdr.arg != 0) - { - errno = hdr.arg; - return -1; - } - return 0; - } - else - { - return dir_create_dir(path, mode, strict); - } -} - static void fio_mkdir_impl(const char* path, int mode, bool strict, int out) { @@ -4136,6 +4105,14 @@ pioLocalDrive_pioIsRemote(VSelf) return false; } +static err_i +pioLocalDrive_pioMakeDir(VSelf, path_t path, mode_t mode, bool strict) +{ + int rc = dir_create_dir(path, mode, strict); + if (rc == 0) return $noerr(); + return $syserr(errno, "Cannot make dir {path:q}", path(path)); +} + static void pioLocalDrive_pioListDir(VSelf, parray *files, const char *root, bool handle_tablespaces, bool follow_symlink, bool backup_logs, bool skip_hidden, @@ -4452,6 +4429,29 @@ pioRemoteDrive_pioIsRemote(VSelf) return true; } +static err_i +pioRemoteDrive_pioMakeDir(VSelf, path_t path, mode_t mode, bool strict) +{ + fio_header hdr = { + .cop = FIO_MKDIR, + .handle = strict ? 1 : 0, /* ugly "hack" to pass more params*/ + .size = strlen(path) + 1, + .arg = mode, + }; + + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_write_all(fio_stdout, path, hdr.size), hdr.size); + + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + Assert(hdr.cop == FIO_MKDIR); + + if (hdr.arg == 0) + { + return $noerr(); + } + return $syserr(hdr.arg, "Cannot make dir {path:q}", path(path)); +} + static void pioRemoteDrive_pioListDir(VSelf, parray *files, const char *root, bool handle_tablespaces, bool follow_symlink, bool backup_logs, bool skip_hidden, diff --git a/src/utils/file.h b/src/utils/file.h index 6f4a8f2e1..e518b17a8 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -175,7 +175,6 @@ fio_get_crc32_truncated(fio_location location, const char *file_path, extern int fio_rename(fio_location location, const char* old_path, const char* new_path); extern int fio_symlink(fio_location location, const char* target, const char* link_path, bool overwrite); extern int fio_remove(fio_location location, const char* path, bool missing_ok); -extern int fio_mkdir(fio_location location, const char* path, int mode, bool strict); extern int fio_chmod(fio_location location, const char* path, int mode); extern int fio_access(fio_location location, const char* path, int mode); extern int fio_stat(fio_location location, const char* path, struct stat* st, bool follow_symlinks); @@ -254,6 +253,7 @@ typedef struct stat stat_t; #define mth__pioGetCRC32 pg_crc32, (path_t, path), (bool, compressed), \ (err_i *, err) #define mth__pioIsRemote bool +#define mth__pioMakeDir err_i, (path_t, path), (mode_t, mode), (bool, strict) #define mth__pioListDir void, (parray *, files), (const char *, root), \ (bool, handle_tablespaces), (bool, symlink_and_hidden), \ (bool, backup_logs), (bool, skip_hidden), (int, external_dir_num) @@ -266,12 +266,13 @@ fobj_method(pioRename); fobj_method(pioExists); fobj_method(pioIsRemote); fobj_method(pioGetCRC32); +fobj_method(pioMakeDir); fobj_method(pioListDir); fobj_method(pioRemoveDir); #define iface__pioDrive mth(pioOpen, pioStat, pioRemove, pioRename), \ mth(pioExists, pioGetCRC32, pioIsRemote), \ - mth(pioListDir, pioRemoveDir) + mth(pioMakeDir, pioListDir, pioRemoveDir) fobj_iface(pioDrive); extern pioDrive_i pioDriveForLocation(fio_location location); From 1eaf9d9008ff7f960b97c4c15a51a9d9d1f9fda6 Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Mon, 14 Nov 2022 12:44:03 +0500 Subject: [PATCH 079/339] Fix S3_write_config parameters --- src/pg_probackup.c | 2 +- src/pg_probackup.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 14c92f95e..77d6fe2b1 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -997,7 +997,7 @@ main(int argc, char *argv[]) err = do_add_instance(instanceState, &instance_config); #ifdef PBCKP_S3 if (err == 0 && s3_protocol != S3_INVALID_PROTOCOL) - err = do_S3_write_config(&instance_config); + err = do_S3_write_config(instanceState); #endif return err; } diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 0c538b8b3..c73874c79 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -825,7 +825,7 @@ extern int do_S3_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params,time_t start_time); extern int do_S3_show(InstanceState *instanceState); extern int do_S3_restore(InstanceState *instanceState, time_t target_backup_id); -extern int do_S3_write_config(InstanceConfig *instance); +extern int do_S3_write_config(InstanceState *instanceState); #endif /* in catchup.c */ From 94370e4b5cf76776d34d6a91e7d7add26fca887f Mon Sep 17 00:00:00 2001 From: Sergey Fukanchik Date: Tue, 15 Nov 2022 13:53:56 +0300 Subject: [PATCH 080/339] [PBCKP-314] move err_i to the beginning of a function. --- src/backup.c | 15 ++++++--------- src/catalog.c | 3 +-- src/catchup.c | 8 ++------ src/dir.c | 38 ++++++++++++++------------------------ src/merge.c | 3 ++- src/restore.c | 3 +-- 6 files changed, 26 insertions(+), 44 deletions(-) diff --git a/src/backup.c b/src/backup.c index 4845bdcff..3a2a60c27 100644 --- a/src/backup.c +++ b/src/backup.c @@ -111,6 +111,8 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, time_t start_time, end_time; char pretty_time[20]; char pretty_bytes[20]; + err_i err = $noerr(); + elog(INFO, "Database backup start"); if(current.external_dir_str) @@ -250,7 +252,6 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, if (current.stream) { char stream_xlog_path[MAXPGPATH]; - err_i err; join_path_components(stream_xlog_path, current.database_dir, PG_XLOG_DIR); err = $i(pioMakeDir, current.backup_location, .path = stream_xlog_path, @@ -406,15 +407,11 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, join_path_components(dirpath, current.database_dir, file->rel_path); elog(LOG, "Create directory '%s'", dirpath); + err = $i(pioMakeDir, current.backup_location, .path = dirpath, + .mode = DIR_PERMISSION, .strict = false); + if ($haserr(err)) { - err_i err; - - err = $i(pioMakeDir, current.backup_location, .path = dirpath, - .mode = DIR_PERMISSION, .strict = false); - if ($haserr(err)) - { - elog(WARNING, "%s", $errmsg(err)); - } + elog(WARNING, "%s", $errmsg(err)); } } diff --git a/src/catalog.c b/src/catalog.c index ee781287c..e528c7364 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1461,7 +1461,7 @@ pgBackupInitDir(pgBackup *backup, const char *backup_instance_path) int i; char temp[MAXPGPATH]; parray *subdirs; - err_i err; + err_i err = $noerr(); /* Try to create backup directory at first */ err = create_backup_dir(backup, backup_instance_path); @@ -1500,7 +1500,6 @@ pgBackupInitDir(pgBackup *backup, const char *backup_instance_path) /* create directories for actual backup files */ for (i = 0; i < parray_num(subdirs); i++) { - err_i err; join_path_components(temp, backup->root_dir, parray_get(subdirs, i)); err = $i(pioMakeDir, backup->backup_location, .path = temp, .mode = DIR_PERMISSION, .strict = false); diff --git a/src/catchup.c b/src/catchup.c index 7f82658ed..73b6be8d3 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -633,6 +633,8 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, ssize_t transfered_datafiles_bytes = 0; ssize_t transfered_walfiles_bytes = 0; char pretty_source_bytes[20]; + err_i err = $noerr(); + source_conn = catchup_init_state(&source_node_info, source_pgdata, dest_pgdata); catchup_preflight_checks(&source_node_info, source_conn, source_pgdata, dest_pgdata); @@ -705,8 +707,6 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, join_path_components(dest_xlog_path, dest_pgdata, PG_XLOG_DIR); if (!dry_run) { - err_i err; - err = $i(pioMakeDir, local_location, .path = dest_xlog_path, .mode = DIR_PERMISSION, .strict = false); if($haserr(err)) @@ -829,8 +829,6 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, elog(LOG, "Create directory '%s'", dirpath); if (!dry_run) { - err_i err; - err = $i(pioMakeDir, local_location, .path = dirpath, .mode = DIR_PERMISSION, .strict = false); if ($haserr(err)) @@ -870,8 +868,6 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, if (!dry_run) { - err_i err; - /* create tablespace directory */ err = $i(pioMakeDir, local_location, .path = linked_path, .mode = file->mode, .strict = false); diff --git a/src/dir.c b/src/dir.c index 6194ea8bc..570e03088 100644 --- a/src/dir.c +++ b/src/dir.c @@ -852,6 +852,8 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba parray *links = NULL; mode_t pg_tablespace_mode = DIR_PERMISSION; char to_path[MAXPGPATH]; + err_i err = $noerr(); + if (waldir_path && !dir_is_empty(waldir_path, location)) { @@ -933,15 +935,11 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba waldir_path, to_path); /* create tablespace directory from waldir_path*/ + err = $i(pioMakeDir, drive, .path = waldir_path, + .mode = pg_tablespace_mode, .strict = false); + if ($haserr(err)) { - err_i err; - - err = $i(pioMakeDir, drive, .path = waldir_path, - .mode = pg_tablespace_mode, .strict = false); - if ($haserr(err)) - { - elog(WARNING, "%s", $errmsg(err)); - } + elog(WARNING, "%s", $errmsg(err)); } /* create link to linked_path */ @@ -984,15 +982,11 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba linked_path, to_path); /* create tablespace directory */ + err = $i(pioMakeDir, drive, .path = linked_path, + .mode = pg_tablespace_mode, .strict = false); + if ($haserr(err)) { - err_i err; - - err = $i(pioMakeDir, drive, .path = linked_path, - .mode = pg_tablespace_mode, .strict = false); - if ($haserr(err)) - { - elog(WARNING, "%s", $errmsg(err)); - } + elog(WARNING, "%s", $errmsg(err)); } /* create link to linked_path */ @@ -1011,15 +1005,11 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba join_path_components(to_path, data_dir, dir->rel_path); // TODO check exit code + err = $i(pioMakeDir, drive, .path = to_path, .mode = dir->mode, + .strict = false); + if ($haserr(err)) { - err_i err; - - err = $i(pioMakeDir, drive, .path = to_path, .mode = dir->mode, - .strict = false); - if ($haserr(err)) - { - elog(WARNING, "%s", $errmsg(err)); - } + elog(WARNING, "%s", $errmsg(err)); } } diff --git a/src/merge.c b/src/merge.c index 61e3aa772..7bc6e6cd1 100644 --- a/src/merge.c +++ b/src/merge.c @@ -461,6 +461,8 @@ merge_chain(InstanceState *instanceState, /* in-place merge flags */ bool compression_match = false; bool program_version_match = false; + err_i err = $noerr(); + /* It's redundant to check block checksumms during merge */ skip_block_validation = true; @@ -641,7 +643,6 @@ merge_chain(InstanceState *instanceState, { char dirpath[MAXPGPATH]; char new_container[MAXPGPATH]; - err_i err; makeExternalDirPathByNum(new_container, full_external_prefix, file->external_dir_num); diff --git a/src/restore.c b/src/restore.c index 7fc15f34d..854ae4c53 100644 --- a/src/restore.c +++ b/src/restore.c @@ -720,6 +720,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, size_t total_bytes = 0; char pretty_time[20]; time_t start_time, end_time; + err_i err = $noerr(); /* Preparations for actual restoring */ time2iso(timestamp, lengthof(timestamp), dest_backup->start_time, false); @@ -818,7 +819,6 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, for (i = 0; i < parray_num(external_dirs); i++) { char *dirpath = parray_get(external_dirs, i); - err_i err; err = $i(pioMakeDir, dest_backup->database_location, .path = dirpath, .mode = DIR_PERMISSION, .strict = false); @@ -844,7 +844,6 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, { char *external_path; char dirpath[MAXPGPATH]; - err_i err; if (parray_num(external_dirs) < file->external_dir_num - 1) elog(ERROR, "Inconsistent external directory backup metadata"); From 19862d8b5e091af701ff6323a1f8ed13238ad570 Mon Sep 17 00:00:00 2001 From: Sergey Fukanchik Date: Tue, 15 Nov 2022 14:46:57 +0300 Subject: [PATCH 081/339] [PBCKP-314] Call FOBJ_FUNC_ARP() to initialize top-level autorelease pool. --- src/pg_probackup.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 3b008deb0..eaeabd2bc 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -294,6 +294,7 @@ main(int argc, char *argv[]) ft_init_log(elog_ft_log); fobj_init(); + FOBJ_FUNC_ARP(); init_pio_objects(); PROGRAM_NAME_FULL = argv[0]; From d6815e0e9e00c4b46a847dfbd02acee8b7fde860 Mon Sep 17 00:00:00 2001 From: Sergey Fukanchik Date: Tue, 15 Nov 2022 16:37:01 +0300 Subject: [PATCH 082/339] [PBCKP-314] Failed directory creation is an error. --- src/backup.c | 5 +++-- src/catalog.c | 5 +++-- src/catchup.c | 4 ++-- src/dir.c | 10 ++++++---- src/init.c | 14 +++++++++----- src/merge.c | 3 ++- src/restore.c | 6 ++++-- 7 files changed, 29 insertions(+), 18 deletions(-) diff --git a/src/backup.c b/src/backup.c index 3a2a60c27..c285218fb 100644 --- a/src/backup.c +++ b/src/backup.c @@ -258,7 +258,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, .mode = DIR_PERMISSION, .strict = false); if ($haserr(err)) { - elog(WARNING, "%s", $errmsg(err)); + elog(ERROR, "Can not create WAL directory: %s", $errmsg(err)); } start_WAL_streaming(backup_conn, stream_xlog_path, &instance_config.conn_opt, @@ -411,7 +411,8 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, .mode = DIR_PERMISSION, .strict = false); if ($haserr(err)) { - elog(WARNING, "%s", $errmsg(err)); + elog(ERROR, "Can not create instance backup directory: %s", + $errmsg(err)); } } diff --git a/src/catalog.c b/src/catalog.c index e528c7364..a41e861cf 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1505,7 +1505,7 @@ pgBackupInitDir(pgBackup *backup, const char *backup_instance_path) .mode = DIR_PERMISSION, .strict = false); if ($haserr(err)) { - elog(WARNING, "%s", $errmsg(err)); + elog(ERROR, "Can not create backup directory: %s", $errmsg(err)); } } @@ -1534,8 +1534,9 @@ create_backup_dir(pgBackup *backup, const char *backup_instance_path) { backup->root_dir = pgut_strdup(path); } else { - elog(WARNING, "%s", $errmsg(err)); + elog(ERROR, "Can not create backup directory: %s", $errmsg(err)); } + return err; } diff --git a/src/catchup.c b/src/catchup.c index 73b6be8d3..7cdcc82cd 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -711,7 +711,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, .mode = DIR_PERMISSION, .strict = false); if($haserr(err)) { - elog(WARNING, "%s", $errmsg(err)); + elog(ERROR, "Can not create WAL directory: %s", $errmsg(err)); } start_WAL_streaming(source_conn, dest_xlog_path, &instance_config.conn_opt, current.start_lsn, current.tli, false); @@ -833,7 +833,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, .mode = DIR_PERMISSION, .strict = false); if ($haserr(err)) { - elog(WARNING, "%s", $errmsg(err)); + elog(ERROR, "Can not create directory: %s", $errmsg(err)); } } } diff --git a/src/dir.c b/src/dir.c index 570e03088..6bb668396 100644 --- a/src/dir.c +++ b/src/dir.c @@ -939,7 +939,8 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba .mode = pg_tablespace_mode, .strict = false); if ($haserr(err)) { - elog(WARNING, "%s", $errmsg(err)); + elog(ERROR, "Can not create tablespace directory: %s", + $errmsg(err)); } /* create link to linked_path */ @@ -986,7 +987,8 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba .mode = pg_tablespace_mode, .strict = false); if ($haserr(err)) { - elog(WARNING, "%s", $errmsg(err)); + elog(ERROR, "Can not create tablespace directory: %s", + $errmsg(err)); } /* create link to linked_path */ @@ -1004,12 +1006,12 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba join_path_components(to_path, data_dir, dir->rel_path); - // TODO check exit code err = $i(pioMakeDir, drive, .path = to_path, .mode = dir->mode, .strict = false); if ($haserr(err)) { - elog(WARNING, "%s", $errmsg(err)); + elog(ERROR, "Can not create tablespace directory: %s", + $errmsg(err)); } } diff --git a/src/init.c b/src/init.c index ce7ad6ee2..b25676a3e 100644 --- a/src/init.c +++ b/src/init.c @@ -38,7 +38,8 @@ do_init(CatalogState *catalogState) .mode = DIR_PERMISSION, .strict = false); if ($haserr(err)) { - elog(WARNING, "%s", $errmsg(err)); + elog(ERROR, "Can not create backup catalog root directory: %s", + $errmsg(err)); } /* create backup catalog data directory */ @@ -46,7 +47,8 @@ do_init(CatalogState *catalogState) .mode = DIR_PERMISSION, .strict = false); if ($haserr(err)) { - elog(WARNING, "%s", $errmsg(err)); + elog(ERROR, "Can not create backup catalog data directory: %s", + $errmsg(err)); } /* create backup catalog wal directory */ @@ -54,7 +56,8 @@ do_init(CatalogState *catalogState) .mode = DIR_PERMISSION, .strict = false); if ($haserr(err)) { - elog(WARNING, "%s", $errmsg(err)); + elog(ERROR, "Can not create backup catalog WAL directory: %s", + $errmsg(err)); } elog(INFO, "Backup catalog '%s' successfully inited", catalogState->catalog_path); @@ -108,13 +111,14 @@ do_add_instance(InstanceState *instanceState, InstanceConfig *instance) .mode = DIR_PERMISSION, .strict = false); if ($haserr(err)) { - elog(WARNING, "%s", $errmsg(err)); + elog(ERROR, "Can not create instance backup directory: %s", + $errmsg(err)); } err = $i(pioMakeDir, backup_location, .path = instanceState->instance_wal_subdir_path, .mode = DIR_PERMISSION, .strict = false); if ($haserr(err)) { - elog(WARNING, "%s", $errmsg(err)); + elog(ERROR, "Can not create instance WAL directory: %s", $errmsg(err)); } /* diff --git a/src/merge.c b/src/merge.c index 7bc6e6cd1..a03cd2209 100644 --- a/src/merge.c +++ b/src/merge.c @@ -651,7 +651,8 @@ merge_chain(InstanceState *instanceState, .mode = DIR_PERMISSION, .strict = false); if ($haserr(err)) { - elog(WARNING, "%s", $errmsg(err)); + elog(ERROR, "Can not create backup external directory: %s", + $errmsg(err)); } } diff --git a/src/restore.c b/src/restore.c index 854ae4c53..6e8b4e10e 100644 --- a/src/restore.c +++ b/src/restore.c @@ -824,7 +824,8 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, .path = dirpath, .mode = DIR_PERMISSION, .strict = false); if ($haserr(err)) { - elog(WARNING, "%s", $errmsg(err)); + elog(ERROR, "Can not restore external directory: %s", + $errmsg(err)); } } } @@ -856,7 +857,8 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, .mode = file->mode, .strict = false); if ($haserr(err)) { - elog(WARNING, "%s", $errmsg(err)); + elog(ERROR, "Can not create backup external directory: %s", + $errmsg(err)); } } } From 8d7b851be65d7bcebc147d91ce05743ec7b0be76 Mon Sep 17 00:00:00 2001 From: Sergey Fukanchik Date: Tue, 15 Nov 2022 20:21:35 +0300 Subject: [PATCH 083/339] [PBCKP-314] Add unit tests for catalog initialization. --- tests/helpers/ptrack_helpers.py | 5 +- tests/init.py | 90 +++++++++++++++++++++++++++++++-- 2 files changed, 90 insertions(+), 5 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 772bbcb1e..fd902e28e 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -869,9 +869,10 @@ def run_binary(self, command, asynchronous=False, env=None): except subprocess.CalledProcessError as e: raise ProbackupException(e.output.decode('utf-8'), command) - def init_pb(self, backup_dir, options=[], old_binary=False): + def init_pb(self, backup_dir, options=[], old_binary=False, cleanup=True): - shutil.rmtree(backup_dir, ignore_errors=True) + if cleanup: + shutil.rmtree(backup_dir, ignore_errors=True) # don`t forget to kill old_binary after remote ssh release if self.remote and not old_binary: diff --git a/tests/init.py b/tests/init.py index f5715d249..ecf4d604d 100644 --- a/tests/init.py +++ b/tests/init.py @@ -1,14 +1,18 @@ import os +import stat import unittest -from .helpers.ptrack_helpers import dir_files, ProbackupTest, ProbackupException import shutil +from .helpers.ptrack_helpers import dir_files, ProbackupTest, ProbackupException + module_name = 'init' +DIR_PERMISSION = 0o700 -class InitTest(ProbackupTest, unittest.TestCase): +CATALOG_DIRS = ['backups', 'wal'] +class InitTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") # @unittest.expectedFailure def test_success(self): @@ -19,8 +23,13 @@ def test_success(self): self.init_pb(backup_dir) self.assertEqual( dir_files(backup_dir), - ['backups', 'wal'] + CATALOG_DIRS ) + + for subdir in CATALOG_DIRS: + dirname = os.path.join(backup_dir, subdir) + self.assertEqual(DIR_PERMISSION, stat.S_IMODE(os.stat(dirname).st_mode)) + self.add_instance(backup_dir, 'node', node) self.assertIn( "INFO: Instance 'node' successfully deleted", @@ -155,3 +164,78 @@ def test_add_instance_idempotence(self): # Clean after yourself self.del_test_dir(module_name, fname) + + def test_init_backup_catalog_no_access(self): + """ Test pg_probackup init -B backup_dir to a dir with no read access. """ + fname = self.id().split('.')[3] + + no_access_dir = os.path.join(self.tmp_path, module_name, fname, + 'noaccess') + backup_dir = os.path.join(no_access_dir, 'backup') + os.makedirs(no_access_dir) + os.chmod(no_access_dir, stat.S_IREAD) + + try: + self.init_pb(backup_dir, cleanup=False) + except ProbackupException as e: + self.assertEqual(f'ERROR: cannot open backup catalog directory "{backup_dir}": Permission denied\n', + e.message) + finally: + self.del_test_dir(module_name, fname) + + def test_init_backup_catalog_no_write(self): + """ Test pg_probackup init -B backup_dir to a dir with no write access. """ + fname = self.id().split('.')[3] + + no_access_dir = os.path.join(self.tmp_path, module_name, fname, + 'noaccess') + backup_dir = os.path.join(no_access_dir, 'backup') + os.makedirs(no_access_dir) + os.chmod(no_access_dir, stat.S_IREAD|stat.S_IEXEC) + + try: + self.init_pb(backup_dir, cleanup=False) + except ProbackupException as e: + self.assertEqual(f'ERROR: Can not create backup catalog root directory: Cannot make dir "{backup_dir}": Permission denied\n', + e.message) + finally: + self.del_test_dir(module_name, fname) + + def test_init_backup_catalog_no_create(self): + """ Test pg_probackup init -B backup_dir to a dir when backup dir exists but not writeable. """ + fname = self.id().split('.')[3] + + parent_dir = os.path.join(self.tmp_path, module_name, fname, + 'parent') + backup_dir = os.path.join(parent_dir, 'backup') + os.makedirs(backup_dir) + os.chmod(backup_dir, stat.S_IREAD|stat.S_IEXEC) + + try: + self.init_pb(backup_dir, cleanup=False) + except ProbackupException as e: + backups_dir = os.path.join(backup_dir, 'backups') + self.assertEqual(f'ERROR: Can not create backup catalog data directory: Cannot make dir "{backups_dir}": Permission denied\n', + e.message) + finally: + self.del_test_dir(module_name, fname) + + def test_init_backup_catalog_exists_not_empty(self): + """ Test pg_probackup init -B backup_dir which exists and not empty. """ + fname = self.id().split('.')[3] + + parent_dir = os.path.join(self.tmp_path, module_name, fname, + 'parent') + backup_dir = os.path.join(parent_dir, 'backup') + os.makedirs(backup_dir) + with open(os.path.join(backup_dir, 'somefile.txt'), 'w') as fout: + fout.write("42\n") + + try: + self.init_pb(backup_dir, cleanup=False) + self.fail("This should have failed due to non empty catalog dir.") + except ProbackupException as e: + self.assertEqual("ERROR: backup catalog already exist and it's not empty\n", + e.message) + finally: + self.del_test_dir(module_name, fname) From cdb56ab27e8bfccaae72c9fe973b14fc7cb7a38b Mon Sep 17 00:00:00 2001 From: Sergey Fukanchik Date: Wed, 16 Nov 2022 00:35:31 +0300 Subject: [PATCH 084/339] [PBCKP-314] Add windows permissions mask. Use self.assertRaisesRegex. --- tests/init.py | 52 +++++++++++++++++++++++---------------------------- 1 file changed, 23 insertions(+), 29 deletions(-) diff --git a/tests/init.py b/tests/init.py index ecf4d604d..161210e32 100644 --- a/tests/init.py +++ b/tests/init.py @@ -8,7 +8,7 @@ module_name = 'init' -DIR_PERMISSION = 0o700 +DIR_PERMISSION = 0o700 if os.name != 'nt' else 0o777 CATALOG_DIRS = ['backups', 'wal'] @@ -175,13 +175,12 @@ def test_init_backup_catalog_no_access(self): os.makedirs(no_access_dir) os.chmod(no_access_dir, stat.S_IREAD) - try: + expected = 'ERROR: cannot open backup catalog directory "{0}": Permission denied'.format(backup_dir) + with self.assertRaisesRegex(ProbackupException, expected): self.init_pb(backup_dir, cleanup=False) - except ProbackupException as e: - self.assertEqual(f'ERROR: cannot open backup catalog directory "{backup_dir}": Permission denied\n', - e.message) - finally: - self.del_test_dir(module_name, fname) + + # Clean after yourself + self.del_test_dir(module_name, fname) def test_init_backup_catalog_no_write(self): """ Test pg_probackup init -B backup_dir to a dir with no write access. """ @@ -193,13 +192,12 @@ def test_init_backup_catalog_no_write(self): os.makedirs(no_access_dir) os.chmod(no_access_dir, stat.S_IREAD|stat.S_IEXEC) - try: + expected = 'ERROR: Can not create backup catalog root directory: Cannot make dir "{0}": Permission denied'.format(backup_dir) + with self.assertRaisesRegex(ProbackupException, expected): self.init_pb(backup_dir, cleanup=False) - except ProbackupException as e: - self.assertEqual(f'ERROR: Can not create backup catalog root directory: Cannot make dir "{backup_dir}": Permission denied\n', - e.message) - finally: - self.del_test_dir(module_name, fname) + + # Clean after yourself + self.del_test_dir(module_name, fname) def test_init_backup_catalog_no_create(self): """ Test pg_probackup init -B backup_dir to a dir when backup dir exists but not writeable. """ @@ -211,14 +209,13 @@ def test_init_backup_catalog_no_create(self): os.makedirs(backup_dir) os.chmod(backup_dir, stat.S_IREAD|stat.S_IEXEC) - try: + backups_dir = os.path.join(backup_dir, 'backups') + expected = 'ERROR: Can not create backup catalog data directory: Cannot make dir "{0}": Permission denied'.format(backups_dir) + with self.assertRaisesRegex(ProbackupException, expected): self.init_pb(backup_dir, cleanup=False) - except ProbackupException as e: - backups_dir = os.path.join(backup_dir, 'backups') - self.assertEqual(f'ERROR: Can not create backup catalog data directory: Cannot make dir "{backups_dir}": Permission denied\n', - e.message) - finally: - self.del_test_dir(module_name, fname) + + # Clean after yourself + self.del_test_dir(module_name, fname) def test_init_backup_catalog_exists_not_empty(self): """ Test pg_probackup init -B backup_dir which exists and not empty. """ @@ -228,14 +225,11 @@ def test_init_backup_catalog_exists_not_empty(self): 'parent') backup_dir = os.path.join(parent_dir, 'backup') os.makedirs(backup_dir) - with open(os.path.join(backup_dir, 'somefile.txt'), 'w') as fout: - fout.write("42\n") + with open(os.path.join(backup_dir, 'somefile.txt'), 'wb'): + pass - try: + with self.assertRaisesRegex(ProbackupException, "ERROR: backup catalog already exist and it's not empty"): self.init_pb(backup_dir, cleanup=False) - self.fail("This should have failed due to non empty catalog dir.") - except ProbackupException as e: - self.assertEqual("ERROR: backup catalog already exist and it's not empty\n", - e.message) - finally: - self.del_test_dir(module_name, fname) + + # Clean after yourself + self.del_test_dir(module_name, fname) From e88da4f1e3c8c4873cde95173579de5147fe6ec6 Mon Sep 17 00:00:00 2001 From: Sergey Fukanchik Date: Wed, 16 Nov 2022 18:48:47 +0300 Subject: [PATCH 085/339] [PBCKP-314] Do cleanup backup dir in test_init_backup_catalog_no_access and test_init_backup_catalog_no_write. --- tests/init.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/init.py b/tests/init.py index 161210e32..7fb352723 100644 --- a/tests/init.py +++ b/tests/init.py @@ -177,7 +177,7 @@ def test_init_backup_catalog_no_access(self): expected = 'ERROR: cannot open backup catalog directory "{0}": Permission denied'.format(backup_dir) with self.assertRaisesRegex(ProbackupException, expected): - self.init_pb(backup_dir, cleanup=False) + self.init_pb(backup_dir) # Clean after yourself self.del_test_dir(module_name, fname) @@ -194,7 +194,7 @@ def test_init_backup_catalog_no_write(self): expected = 'ERROR: Can not create backup catalog root directory: Cannot make dir "{0}": Permission denied'.format(backup_dir) with self.assertRaisesRegex(ProbackupException, expected): - self.init_pb(backup_dir, cleanup=False) + self.init_pb(backup_dir) # Clean after yourself self.del_test_dir(module_name, fname) From e9055001aceaf564088c600ec31581374c0f6f02 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 16 Nov 2022 03:45:32 +0300 Subject: [PATCH 086/339] [PBCKP-345] rework file stat handling to simplify and cross-platform-ability - don't use full `struct stat` since it differs between platforms - store file kind separately from mode in pb specific way - add pioFilesAreSame as replacement of fio_is_same_file since it needs st_ino and st_dev, therefore had to run on localdrive always. --- src/archive.c | 6 +- src/backup.c | 22 ++-- src/catalog.c | 32 ++++-- src/catchup.c | 12 +-- src/checkdb.c | 4 +- src/dir.c | 58 +++++----- src/fetch.c | 18 ++-- src/merge.c | 5 +- src/pg_probackup.c | 20 ++-- src/pg_probackup.h | 18 ++-- src/restore.c | 26 ++--- src/utils/file.c | 259 +++++++++++++++++++++++++++++++++------------ src/utils/file.h | 39 +++++-- src/validate.c | 13 ++- 14 files changed, 356 insertions(+), 176 deletions(-) diff --git a/src/archive.c b/src/archive.c index bcc0bccbc..abc04e86e 100644 --- a/src/archive.c +++ b/src/archive.c @@ -388,7 +388,7 @@ push_file_internal(const char *wal_file_name, const char *pg_xlog_dir, char to_fullpath[MAXPGPATH]; char to_fullpath_part[MAXPGPATH]; /* partial handling */ - struct stat st; + pio_stat_t st; int partial_try_count = 0; ssize_t partial_file_size = 0; bool partial_is_stale = true; @@ -466,11 +466,11 @@ push_file_internal(const char *wal_file_name, const char *pg_xlog_dir, elog(LOG, "Temp WAL file already exists, waiting on it %u seconds: \"%s\"", archive_timeout, to_fullpath_part); - partial_file_size = st.st_size; + partial_file_size = st.pst_size; } /* file size is changing */ - if (st.st_size != partial_file_size) + if (st.pst_size != partial_file_size) partial_is_stale = false; sleep(1); diff --git a/src/backup.c b/src/backup.c index c285218fb..6b1ee6bd3 100644 --- a/src/backup.c +++ b/src/backup.c @@ -392,7 +392,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, pgFile *file = (pgFile *) parray_get(backup_files_list, i); /* if the entry was a directory, create it in the backup */ - if (S_ISDIR(file->mode)) + if (file->kind == PIO_KIND_DIRECTORY) { char dirpath[MAXPGPATH]; @@ -569,7 +569,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, pgFile *file = (pgFile *) parray_get(backup_files_list, i); /* TODO: sync directory ? */ - if (S_ISDIR(file->mode)) + if (file->kind == PIO_KIND_DIRECTORY) continue; if (file->write_size <= 0) @@ -1837,7 +1837,7 @@ pg_stop_backup_write_file_helper(const char *path, const char *filename, const c file = pgFileNew(full_filename, filename, true, 0, FIO_BACKUP_HOST); - if (S_ISREG(file->mode)) + if (file->kind == PIO_KIND_REGULAR) { file->crc = pgFileGetCRC32C(full_filename, false); @@ -1991,7 +1991,7 @@ backup_files(void *arg) pgFile *prev_file = NULL; /* We have already copied all directories */ - if (S_ISDIR(file->mode)) + if (file->kind == PIO_KIND_DIRECTORY) continue; if (arguments->thread_num == 1) @@ -2046,9 +2046,9 @@ backup_files(void *arg) } /* Encountered some strange beast */ - if (!S_ISREG(file->mode)) - elog(WARNING, "Unexpected type %d of file \"%s\", skipping", - file->mode, from_fullpath); + if (file->kind != PIO_KIND_REGULAR) + elog(WARNING, "Unexpected type %s of file \"%s\", skipping", + pio_file_kind2str(file->kind, from_fullpath), from_fullpath); /* Check that file exist in previous backup */ if (current.backup_mode != BACKUP_MODE_FULL) @@ -2121,7 +2121,7 @@ parse_filelist_filenames(parray *files, const char *root) pgFile *file = (pgFile *) parray_get(files, i); int sscanf_result; - if (S_ISREG(file->mode) && + if (file->kind == PIO_KIND_REGULAR && path_is_prefix_of_path(PG_TBLSPC_DIR, file->rel_path)) { /* @@ -2148,7 +2148,7 @@ parse_filelist_filenames(parray *files, const char *root) } } - if (S_ISREG(file->mode) && file->tblspcOid != 0 && + if (file->kind == PIO_KIND_REGULAR && file->tblspcOid != 0 && file->name && file->name[0]) { if (file->forkName == init) @@ -2218,7 +2218,7 @@ set_cfs_datafiles(parray *files, const char *root, char *relative, size_t i) if (strstr(prev_file->rel_path, cfs_tblspc_path) != NULL) { - if (S_ISREG(prev_file->mode) && prev_file->is_datafile) + if (prev_file->kind == PIO_KIND_REGULAR && prev_file->is_datafile) { elog(LOG, "Setting 'is_cfs' on file %s, name %s", prev_file->rel_path, prev_file->name); @@ -2375,7 +2375,7 @@ calculate_datasize_of_filelist(parray *filelist) if (file->external_dir_num != 0 || file->excluded) continue; - if (S_ISDIR(file->mode)) + if (file->kind == PIO_KIND_DIRECTORY) { // TODO is a dir always 4K? bytes += 4096; diff --git a/src/catalog.c b/src/catalog.c index a41e861cf..21a25f3d9 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -847,12 +847,22 @@ pgBackupGetBackupMode(pgBackup *backup, bool show_color) static bool IsDir(const char *dirpath, const char *entry, fio_location location) { + FOBJ_FUNC_ARP(); char path[MAXPGPATH]; - struct stat st; + pio_stat_t st; + err_i err; join_path_components(path, dirpath, entry); - return fio_stat(location, path, &st, false) == 0 && S_ISDIR(st.st_mode); + st = $i(pioStat, pioDriveForLocation(location), + .path = path, .follow_symlink = false, .err = &err); + if ($haserr(err)) + { + ft_logerr(FT_WARNING, $errmsg(err), "IsDir"); + return false; + } + + return st.pst_kind == PIO_KIND_DIRECTORY; } /* @@ -1074,6 +1084,7 @@ get_backup_filelist(pgBackup *backup, bool strict) char path[MAXPGPATH]; char linked[MAXPGPATH]; char compress_alg_string[MAXPGPATH]; + char kind[16]; int64 write_size, uncompressed_size, mode, /* bit length of mode_t depends on platforms */ @@ -1115,6 +1126,11 @@ get_backup_filelist(pgBackup *backup, bool strict) /* * Optional fields */ + if (get_control_value_str(buf, "kind", kind, sizeof(kind), false)) + file->kind = pio_str2file_kind(kind, path); + else /* fallback to mode for old backups */ + file->kind = pio_statmode2file_kind(file->mode, path); + if (get_control_value_str(buf, "linked", linked, sizeof(linked), false) && linked[0]) { file->linked = pgut_strdup(linked); @@ -1146,7 +1162,7 @@ get_backup_filelist(pgBackup *backup, bool strict) if (!file->is_datafile || file->is_cfs) file->size = file->uncompressed_size; - if (file->external_dir_num == 0 && S_ISREG(file->mode)) + if (file->external_dir_num == 0 && file->kind == PIO_KIND_REGULAR) { bool is_datafile = file->is_datafile; set_forkname(file); @@ -2564,14 +2580,14 @@ write_backup_filelist(pgBackup *backup, parray *files, const char *root, if (file->write_size == FILE_NOT_FOUND) continue; - if (S_ISDIR(file->mode)) + if (file->kind == PIO_KIND_DIRECTORY) { backup_size_on_disk += 4096; uncompressed_size_on_disk += 4096; } /* Count the amount of the data actually copied */ - if (S_ISREG(file->mode) && file->write_size > 0) + if (file->kind == PIO_KIND_REGULAR && file->write_size > 0) { /* * Size of WAL files in 'pg_wal' is counted separately @@ -2587,11 +2603,13 @@ write_backup_filelist(pgBackup *backup, parray *files, const char *root, } len = sprintf(line, "{\"path\":\"%s\", \"size\":\"" INT64_FORMAT "\", " - "\"mode\":\"%u\", \"is_datafile\":\"%u\", " + "\"kind\":\"%s\", \"mode\":\"%u\", \"is_datafile\":\"%u\", " "\"is_cfs\":\"%u\", \"crc\":\"%u\", " "\"compress_alg\":\"%s\", \"external_dir_num\":\"%d\", " "\"dbOid\":\"%u\"", - file->rel_path, file->write_size, file->mode, + file->rel_path, file->write_size, + pio_file_kind2str(file->kind, file->rel_path), + file->mode, file->is_datafile ? 1 : 0, file->is_cfs ? 1 : 0, file->crc, diff --git a/src/catchup.c b/src/catchup.c index 7cdcc82cd..9f6779302 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -378,7 +378,7 @@ catchup_thread_runner(void *arg) pgFile *dest_file = NULL; /* We have already copied all directories */ - if (S_ISDIR(file->mode)) + if (file->kind == PIO_KIND_DIRECTORY) continue; if (file->excluded) @@ -400,9 +400,9 @@ catchup_thread_runner(void *arg) join_path_components(to_fullpath, arguments->to_root, file->rel_path); /* Encountered some strange beast */ - if (!S_ISREG(file->mode)) - elog(WARNING, "Unexpected type %d of file \"%s\", skipping", - file->mode, from_fullpath); + if (file->kind != PIO_KIND_REGULAR) + elog(WARNING, "Unexpected kind %s of file \"%s\", skipping", + pio_file_kind2str(file->kind, from_fullpath), from_fullpath); /* Check that file exist in dest pgdata */ if (arguments->backup_mode != BACKUP_MODE_FULL) @@ -546,7 +546,7 @@ catchup_sync_destination_files(const char* pgdata_path, fio_location location, p * - but PG itself is not relying on fs, its durable_sync * includes directory sync */ - if (S_ISDIR(file->mode) || file->excluded) + if (file->kind == PIO_KIND_DIRECTORY || file->excluded) continue; Assert(file->external_dir_num == 0); @@ -807,7 +807,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, pgFile *file = (pgFile *) parray_get(source_filelist, i); char parent_dir[MAXPGPATH]; - if (!S_ISDIR(file->mode) || file->excluded) + if (file->kind != PIO_KIND_DIRECTORY || file->excluded) continue; /* diff --git a/src/checkdb.c b/src/checkdb.c index 2943665a2..bc3c60fc5 100644 --- a/src/checkdb.c +++ b/src/checkdb.c @@ -147,7 +147,7 @@ check_files(void *arg) elog(ERROR, "interrupted during checkdb"); /* No need to check directories */ - if (S_ISDIR(file->mode)) + if (file->kind == PIO_KIND_DIRECTORY) continue; if (!pg_atomic_test_set_flag(&file->lock)) @@ -161,7 +161,7 @@ check_files(void *arg) elog(INFO, "Progress: (%d/%d). Process file \"%s\"", i + 1, n_files_list, from_fullpath); - if (S_ISREG(file->mode)) + if (file->kind == PIO_KIND_REGULAR) { /* check only uncompressed by cfs datafiles */ if (file->is_datafile && !file->is_cfs) diff --git a/src/dir.c b/src/dir.c index f3dad2917..f245bcd37 100644 --- a/src/dir.c +++ b/src/dir.c @@ -145,23 +145,26 @@ pgFile * pgFileNew(const char *path, const char *rel_path, bool follow_symlink, int external_dir_num, fio_location location) { - struct stat st; + FOBJ_FUNC_ARP(); + pio_stat_t st; pgFile *file; + err_i err; /* stat the file */ - if (fio_stat(location, path, &st, follow_symlink) < 0) - { + st = $i(pioStat, pioDriveForLocation(location), .path = path, + .follow_symlink = follow_symlink, .err = &err); + if ($haserr(err)) { /* file not found is not an error case */ - if (errno == ENOENT) + if (getErrno(err) == ENOENT) return NULL; - elog(ERROR, "cannot stat file \"%s\": %s", path, - strerror(errno)); + ft_logerr(FT_FATAL, $errmsg(err), "pgFileNew"); } file = pgFileInit(rel_path); - file->size = st.st_size; - file->mode = st.st_mode; - file->mtime = st.st_mtime; + file->size = st.pst_size; + file->kind = st.pst_kind; + file->mode = st.pst_mode; + file->mtime = st.pst_mtime; file->external_dir_num = external_dir_num; return file; @@ -393,7 +396,7 @@ dir_list_file(parray *files, const char *root, bool handle_tablespaces, bool fol return; } - if (!S_ISDIR(file->mode)) + if (file->kind != PIO_KIND_DIRECTORY) { if (external_dir_num > 0) elog(ERROR, " --external-dirs option \"%s\": directory or symbolic link expected", @@ -435,7 +438,7 @@ dir_check_file(pgFile *file, bool backup_logs) in_tablespace = path_is_prefix_of_path(PG_TBLSPC_DIR, file->rel_path); /* Check if we need to exclude file by name */ - if (S_ISREG(file->mode)) + if (file->kind == PIO_KIND_REGULAR) { for (i = 0; pgdata_exclude_files[i]; i++) if (strcmp(file->rel_path, pgdata_exclude_files[i]) == 0) @@ -449,7 +452,7 @@ dir_check_file(pgFile *file, bool backup_logs) * If the directory name is in the exclude list, do not list the * contents. */ - else if (S_ISDIR(file->mode) && !in_tablespace && file->external_dir_num == 0) + else if (file->kind == PIO_KIND_DIRECTORY && !in_tablespace && file->external_dir_num == 0) { /* * If the item in the exclude list starts with '/', compare to @@ -478,14 +481,14 @@ dir_check_file(pgFile *file, bool backup_logs) } /* Do not backup ptrack_init files */ - if (S_ISREG(file->mode) && strcmp(file->name, "ptrack_init") == 0) + if (file->kind == PIO_KIND_REGULAR && strcmp(file->name, "ptrack_init") == 0) return CHECK_FALSE; /* * Check files located inside database directories including directory * 'global' */ - if (S_ISREG(file->mode) && file->tblspcOid != 0 && + if (file->kind == PIO_KIND_REGULAR && file->tblspcOid != 0 && file->name && file->name[0]) { if (strcmp(file->name, "pg_internal.init") == 0) @@ -607,7 +610,7 @@ dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, struct dirent *dent; bool in_tablespace = false; - if (!S_ISDIR(parent->mode)) + if (parent->kind != PIO_KIND_DIRECTORY) elog(ERROR, "\"%s\" is not a directory", parent_dir); in_tablespace = path_is_prefix_of_path(PG_TBLSPC_DIR, parent->rel_path); @@ -641,7 +644,7 @@ dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, continue; /* Skip entries point current dir or parent dir */ - if (S_ISDIR(file->mode) && + if (file->kind == PIO_KIND_DIRECTORY && (strcmp(dent->d_name, ".") == 0 || strcmp(dent->d_name, "..") == 0)) { pgFileFree(file); @@ -659,7 +662,7 @@ dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, * Add only files, directories and links. Skip sockets and other * unexpected file formats. */ - if (!S_ISDIR(file->mode) && !S_ISREG(file->mode)) + if (file->kind != PIO_KIND_DIRECTORY && file->kind != PIO_KIND_REGULAR) { elog(WARNING, "Skip '%s': unexpected file format", child); pgFileFree(file); @@ -671,7 +674,7 @@ dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, * Do not copy tablespaces twice. It may happen if the tablespace is located * inside the PGDATA. */ - if (S_ISDIR(file->mode) && + if (file->kind == PIO_KIND_DIRECTORY && strcmp(file->name, TABLESPACE_VERSION_DIRECTORY) == 0) { Oid tblspcOid; @@ -718,7 +721,7 @@ dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, * If the entry is a directory call dir_list_file_internal() * recursively. */ - if (S_ISDIR(file->mode)) + if (file->kind == PIO_KIND_DIRECTORY) dir_list_file_internal(files, file, child, handle_tablespaces, follow_symlink, backup_logs, skip_hidden, external_dir_num, location); } @@ -886,7 +889,7 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba { pgFile *file = (pgFile *) parray_get(dest_files, i); - if (!S_ISDIR(file->mode)) + if (file->kind != PIO_KIND_DIRECTORY) continue; /* skip external directory content */ @@ -919,7 +922,7 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba char parent_dir[MAXPGPATH]; pgFile *dir = (pgFile *) parray_get(dest_files, i); - if (!S_ISDIR(dir->mode)) + if (dir->kind != PIO_KIND_DIRECTORY) continue; /* skip external directory content */ @@ -1523,14 +1526,13 @@ dir_is_empty(const char *path, fio_location location) bool fileExists(const char *path, fio_location location) { - struct stat buf; + FOBJ_FUNC_ARP(); + err_i err; + bool exists; - if (fio_stat(location, path, &buf, true) == -1 && errno == ENOENT) - return false; - else if (!S_ISREG(buf.st_mode)) - return false; - else - return true; + exists = $i(pioExists, pioDriveForLocation(location), path, &err); + + return exists; } /* diff --git a/src/fetch.c b/src/fetch.c index 980bf531b..bce8dacc4 100644 --- a/src/fetch.c +++ b/src/fetch.c @@ -28,9 +28,10 @@ slurpFile(fio_location location, const char *datadir, const char *path, size_t * { int fd; char *buffer; - struct stat statbuf; + pio_stat_t statbuf; char fullpath[MAXPGPATH]; - int len; + size_t len; + err_i err; join_path_components(fullpath, datadir, path); @@ -43,16 +44,21 @@ slurpFile(fio_location location, const char *datadir, const char *path, size_t * fullpath, strerror(errno)); } - if (fio_stat(location, fullpath, &statbuf, true) < 0) + statbuf = $i(pioStat, pioDriveForLocation(location), .path = fullpath, + .follow_symlink = true, .err = &err); + if ($haserr(err)) { if (safe) return NULL; else - elog(ERROR, "Could not stat file \"%s\": %s", - fullpath, strerror(errno)); + ft_logerr(FT_FATAL, $errmsg(err), "slurpFile"); } - len = statbuf.st_size; + if (statbuf.pst_size > SIZE_MAX) + ft_log(FT_FATAL, "file \"%s\" is too large: %lld", + fullpath, (long long)statbuf.pst_size); + + len = statbuf.pst_size; buffer = pg_malloc(len + 1); if (fio_read(fd, buffer, len) != len) diff --git a/src/merge.c b/src/merge.c index a03cd2209..5d787e567 100644 --- a/src/merge.c +++ b/src/merge.c @@ -639,7 +639,7 @@ merge_chain(InstanceState *instanceState, pgFile *file = (pgFile *) parray_get(dest_backup->files, i); /* if the entry was an external directory, create it in the backup */ - if (file->external_dir_num && S_ISDIR(file->mode)) + if (file->external_dir_num && file->kind == PIO_KIND_DIRECTORY) { char dirpath[MAXPGPATH]; char new_container[MAXPGPATH]; @@ -955,6 +955,7 @@ merge_files(void *arg) continue; tmp_file = pgFileInit(dest_file->rel_path); + tmp_file->kind = dest_file->kind; tmp_file->mode = dest_file->mode; tmp_file->is_datafile = dest_file->is_datafile; tmp_file->is_cfs = dest_file->is_cfs; @@ -962,7 +963,7 @@ merge_files(void *arg) tmp_file->dbOid = dest_file->dbOid; /* Directories were created before */ - if (S_ISDIR(dest_file->mode)) + if (dest_file->kind == PIO_KIND_DIRECTORY) goto done; elog(progress ? INFO : LOG, "Progress: (%d/%zu). Merging file \"%s\"", diff --git a/src/pg_probackup.c b/src/pg_probackup.c index eaeabd2bc..32d28c004 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -291,6 +291,7 @@ main(int argc, char *argv[]) { char *command = NULL; ProbackupSubcmd backup_subcmd = NO_CMD; + err_i err; ft_init_log(elog_ft_log); fobj_init(); @@ -536,13 +537,17 @@ main(int argc, char *argv[]) if (backup_subcmd != INIT_CMD && backup_subcmd != ADD_INSTANCE_CMD && backup_subcmd != ARCHIVE_GET_CMD) { - struct stat st; + pio_stat_t st; - if (fio_stat(FIO_BACKUP_HOST, instanceState->instance_backup_subdir_path, - &st, true) != 0) + st = $i(pioStat, pioDriveForLocation(FIO_BACKUP_HOST), + .path = instanceState->instance_backup_subdir_path, + .follow_symlink = true, + .err = &err); + + if ($haserr(err)) { - elog(WARNING, "Failed to access directory \"%s\": %s", - instanceState->instance_backup_subdir_path, strerror(errno)); + ft_logerr(FT_WARNING, $errmsg(err), "Failed to access directory \"%s\"", + instanceState->instance_backup_subdir_path); // TODO: redundant message, should we get rid of it? elog(ERROR, "Instance '%s' does not exist in this backup catalog", @@ -551,7 +556,7 @@ main(int argc, char *argv[]) else { /* Ensure that backup_path is a path to a directory */ - if (!S_ISDIR(st.st_mode)) + if (st.pst_kind != PIO_KIND_DIRECTORY) elog(ERROR, "-B, --backup-path must be a path to directory"); } } @@ -896,7 +901,8 @@ main(int argc, char *argv[]) */ char *stripped_wal_file_path = pgut_str_strip_trailing_filename(wal_file_path, wal_file_name); join_path_components(archive_push_xlog_dir, instance_config.pgdata, XLOGDIR); - if (fio_is_same_file(FIO_DB_HOST, stripped_wal_file_path, archive_push_xlog_dir, true)) + if ($i(pioFilesAreSame, pioDriveForLocation(FIO_DB_HOST), + .file1 = stripped_wal_file_path, .file2 = archive_push_xlog_dir)) { /* 2nd case */ system_id = get_system_identifier(FIO_DB_HOST, instance_config.pgdata, false); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 41f888d18..4f8ba7376 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -211,18 +211,21 @@ typedef enum ForkName typedef struct pgFile { char *name; /* file or directory name */ - mode_t mode; /* protection (file type and permission) */ - size_t size; /* size of the file */ - time_t mtime; /* file st_mtime attribute, can be used only - during backup */ - size_t read_size; /* size of the portion read (if only some pages are + + pio_file_kind_e kind; /* kind of file */ + uint32_t mode; /* protection (permission) */ + uint64_t size; /* size of the file */ + + uint64_t read_size; /* size of the portion read (if only some pages are backed up, it's different from size) */ - int64 write_size; /* size of the backed-up file. BYTES_INVALID means + int64_t write_size; /* size of the backed-up file. BYTES_INVALID means that the file existed but was not backed up because not modified since last backup. */ - size_t uncompressed_size; /* size of the backed-up file before compression + uint64_t uncompressed_size; /* size of the backed-up file before compression * and adding block headers. */ + time_t mtime; /* file st_mtime attribute, can be used only + during backup */ /* we need int64 here to store '-1' value */ pg_crc32 crc; /* CRC value of the file, regular file only */ char *rel_path; /* relative path of the file */ @@ -1022,7 +1025,6 @@ extern bool backup_contains_external(const char *dir, parray *dirs_list); extern bool dir_is_empty(const char *path, fio_location location); extern bool fileExists(const char *path, fio_location location); -extern size_t pgFileSize(const char *path); extern pgFile *pgFileNew(const char *path, const char *rel_path, bool follow_symlink, int external_dir_num, diff --git a/src/restore.c b/src/restore.c index 6e8b4e10e..428a0e1cd 100644 --- a/src/restore.c +++ b/src/restore.c @@ -837,11 +837,11 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, { pgFile *file = (pgFile *) parray_get(dest_files, i); - if (S_ISDIR(file->mode)) + if (file->kind == PIO_KIND_DIRECTORY) total_bytes += 4096; if (!params->skip_external_dirs && - file->external_dir_num && S_ISDIR(file->mode)) + file->external_dir_num && file->kind == PIO_KIND_DIRECTORY) { char *external_path; char dirpath[MAXPGPATH]; @@ -926,7 +926,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, redundant = true; /* do not delete the useful internal directories */ - if (S_ISDIR(file->mode) && !redundant) + if (file->kind == PIO_KIND_DIRECTORY && !redundant) continue; /* if file does not exists in destination list, then we can safely unlink it */ @@ -1056,7 +1056,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, char to_fullpath[MAXPGPATH]; pgFile *dest_file = (pgFile *) parray_get(dest_files, i); - if (S_ISDIR(dest_file->mode)) + if (dest_file->kind == PIO_KIND_DIRECTORY) continue; /* skip external files if ordered to do so */ @@ -1137,7 +1137,7 @@ restore_files(void *arg) pgFile *dest_file = (pgFile *) parray_get(arguments->dest_files, i); /* Directories were created before */ - if (S_ISDIR(dest_file->mode)) + if (dest_file->kind == PIO_KIND_DIRECTORY) continue; if (!pg_atomic_test_set_flag(&dest_file->lock)) @@ -1543,13 +1543,14 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, char path[MAXPGPATH]; FILE *fp = NULL; FILE *fp_tmp = NULL; - struct stat st; + pio_stat_t st; char current_time_str[100]; /* postgresql.auto.conf parsing */ char line[16384] = "\0"; char *buf = NULL; int buf_len = 0; int buf_len_max = 16384; + err_i err; elog(LOG, "update recovery settings in postgresql.auto.conf"); @@ -1557,17 +1558,16 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, join_path_components(postgres_auto_path, instance_config.pgdata, "postgresql.auto.conf"); - if (fio_stat(FIO_DB_HOST, postgres_auto_path, &st, false) < 0) + st = $i(pioStat, pioDriveForLocation(FIO_DB_HOST), + .path = postgres_auto_path, .follow_symlink = false, .err = &err); + /* file not found is not an error case */ + if ($haserr(err) && getErrno(err) != ENOENT) { - /* file not found is not an error case */ - if (errno != ENOENT) - elog(ERROR, "cannot stat file \"%s\": %s", postgres_auto_path, - strerror(errno)); - st.st_size = 0; + ft_logerr(FT_FATAL, $errmsg(err), ""); } /* Kludge for 0-sized postgresql.auto.conf file. TODO: make something more intelligent */ - if (st.st_size > 0) + if (st.pst_size > 0) { fp = fio_open_stream(FIO_DB_HOST, postgres_auto_path); if (fp == NULL) diff --git a/src/utils/file.c b/src/utils/file.c index df305c73a..eae68201f 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -51,6 +51,7 @@ typedef struct { typedef struct { + pio_file_kind_e kind; mode_t mode; size_t size; time_t mtime; @@ -258,6 +259,110 @@ fio_get_agent_version(int* protocol, char* payload_buf, size_t payload_buf_size) IO_CHECK(fio_read_all(fio_stdin, payload_buf, hdr.size), hdr.size); } +pio_file_kind_e +pio_statmode2file_kind(mode_t mode, const char* path) +{ + pio_file_kind_e kind; + if (S_ISREG(mode)) + kind = PIO_KIND_REGULAR; + else if (S_ISDIR(mode)) + kind = PIO_KIND_DIRECTORY; +#ifdef S_ISLNK + else if (S_ISLNK(mode)) + kind = PIO_KIND_SYMLINK; +#endif +#ifdef S_ISFIFO + else if (S_ISFIFO(mode)) + kind = PIO_KIND_FIFO; +#endif +#ifdef S_ISSOCK + else if (S_ISFIFO(mode)) + kind = PIO_KIND_SOCK; +#endif +#ifdef S_ISCHR + else if (S_ISCHR(mode)) + kind = PIO_KIND_CHARDEV; +#endif +#ifdef S_ISBLK + else if (S_ISBLK(mode)) + kind = PIO_KIND_BLOCKDEV; +#endif + else + elog(ERROR, "Unsupported file mode kind \"%x\" for file '%s'", + mode, path); + return kind; +} + +pio_file_kind_e +pio_str2file_kind(const char* str, const char* path) +{ + pio_file_kind_e kind; + if (strncmp(str, "reg", 3) == 0) + kind = PIO_KIND_REGULAR; + else if (strncmp(str, "dir", 3) == 0) + kind = PIO_KIND_DIRECTORY; + else if (strncmp(str, "sym", 3) == 0) + kind = PIO_KIND_SYMLINK; + else if (strncmp(str, "fifo", 4) == 0) + kind = PIO_KIND_FIFO; + else if (strncmp(str, "sock", 4) == 0) + kind = PIO_KIND_SOCK; + else if (strncmp(str, "chdev", 5) == 0) + kind = PIO_KIND_CHARDEV; + else if (strncmp(str, "bldev", 5) == 0) + kind = PIO_KIND_BLOCKDEV; + else + elog(ERROR, "Unknown file kind \"%s\" for file '%s'", + str, path); + return kind; +} + +const char* +pio_file_kind2str(pio_file_kind_e kind, const char *path) +{ + switch (kind) + { + case PIO_KIND_REGULAR: + return "reg"; + case PIO_KIND_DIRECTORY: + return "dir"; + case PIO_KIND_SYMLINK: + return "sym"; + case PIO_KIND_FIFO: + return "fifo"; + case PIO_KIND_SOCK: + return "sock"; + case PIO_KIND_CHARDEV: + return "chdev"; + case PIO_KIND_BLOCKDEV: + return "bldev"; + default: + elog(ERROR, "Unknown file kind \"%d\" for file '%s'", + kind, path); + } + return NULL; +} + +#ifndef S_ISGID +#define S_ISGID 0 +#endif +#ifndef S_ISUID +#define S_ISUID 0 +#endif +#ifndef S_ISVTX +#define S_ISVTX 0 +#endif + +mode_t +pio_limit_mode(mode_t mode) +{ + if (S_ISDIR(mode)) + mode &= 0x1ff | S_ISGID | S_ISUID | S_ISVTX; + else + mode &= 0x1ff; + return mode; +} + /* Open input stream. Remote file is fetched to the in-memory buffer and then accessed through Linux fmemopen */ FILE* fio_open_stream(fio_location location, const char* path) @@ -1094,66 +1199,6 @@ fio_read(int fd, void* buf, size_t size) } } -/* Get information about file */ -int -fio_stat(fio_location location, const char* path, struct stat* st, bool follow_symlink) -{ - if (fio_is_remote(location)) - { - fio_header hdr = { - .cop = FIO_STAT, - .handle = -1, - .size = strlen(path) + 1, - .arg = follow_symlink, - }; - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(fio_stdout, path, hdr.size), hdr.size); - - IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); - Assert(hdr.cop == FIO_STAT); - IO_CHECK(fio_read_all(fio_stdin, st, sizeof(*st)), sizeof(*st)); - - if (hdr.arg != 0) - { - errno = hdr.arg; - return -1; - } - return 0; - } - else - { - return follow_symlink ? stat(path, st) : lstat(path, st); - } -} - -/* - * Compare, that filename1 and filename2 is the same file - * in windows compare only filenames - */ -bool -fio_is_same_file(fio_location location, const char* filename1, const char* filename2, bool follow_symlink) -{ -#ifndef WIN32 - struct stat stat1, stat2; - - if (fio_stat(location, filename1, &stat1, follow_symlink) < 0) - elog(ERROR, "Can't stat file \"%s\": %s", filename1, strerror(errno)); - - if (fio_stat(location, filename2, &stat2, follow_symlink) < 0) - elog(ERROR, "Can't stat file \"%s\": %s", filename2, strerror(errno)); - - return stat1.st_ino == stat2.st_ino && stat1.st_dev == stat2.st_dev; -#else - char *abs_name1 = make_absolute_path(filename1); - char *abs_name2 = make_absolute_path(filename2); - bool result = strcmp(abs_name1, abs_name2) == 0; - free(abs_name2); - free(abs_name1); - return result; -#endif -} - /* * Read value of a symbolic link * this is a wrapper about readlink() syscall @@ -3263,6 +3308,7 @@ fio_list_dir_impl(int out, char* buf) fio_pgFile fio_file; pgFile *file = (pgFile *) parray_get(file_files, i); + fio_file.kind = file->kind; fio_file.mode = file->mode; fio_file.size = file->size; fio_file.mtime = file->mtime; @@ -3552,10 +3598,16 @@ fio_communicate(int in, int out) size_t buf_size = 128*1024; char* buf = (char*)pgut_malloc(buf_size); fio_header hdr; - struct stat st; + pioDrive_i drive; + pio_stat_t st; int rc; int tmp_fd; pg_crc32 crc; + err_i err = $noerr(); + + FOBJ_FUNC_ARP(); + + drive = pioDriveForLocation(FIO_LOCAL_HOST); #ifdef WIN32 SYS_CHECK(setmode(in, _O_BINARY)); @@ -3564,6 +3616,7 @@ fio_communicate(int in, int out) /* Main loop until end of processing all master commands */ while ((rc = fio_read_all(in, &hdr, sizeof hdr)) == sizeof(hdr)) { + FOBJ_LOOP_ARP(); if (hdr.size != 0) { if (hdr.size > buf_size) { /* Extend buffer on demand */ @@ -3656,11 +3709,17 @@ fio_communicate(int in, int out) } case FIO_STAT: /* Get information about file with specified path */ hdr.size = sizeof(st); - rc = hdr.arg ? stat(buf, &st) : lstat(buf, &st); - hdr.arg = rc < 0 ? errno : 0; + st = $i(pioStat, drive, buf, .follow_symlink = hdr.arg != 0, + .err = &err); + hdr.arg = $haserr(err) ? getErrno(err) : 0; IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_write_all(out, &st, sizeof(st)), sizeof(st)); break; + case FIO_FILES_ARE_SAME: + hdr.arg = (int)$i(pioFilesAreSame, drive, buf, buf+strlen(buf)+1); + hdr.size = 0; + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); + break; case FIO_ACCESS: /* Check presence of file with specified name */ hdr.size = 0; hdr.arg = access(buf, hdr.arg) < 0 ? errno : 0; @@ -3903,7 +3962,7 @@ pioFile_fobjDispose(VSelf) static bool common_pioExists(fobj_t self, path_t path, err_i *err) { - struct stat buf; + pio_stat_t buf; fobj_reset_err(err); /* follow symlink ? */ @@ -3913,7 +3972,7 @@ common_pioExists(fobj_t self, path_t path, err_i *err) *err = $noerr(); return false; } - if ($noerr(*err) && !S_ISREG(buf.st_mode)) + if ($noerr(*err) && buf.pst_kind != PIO_KIND_REGULAR) *err = $err(SysErr, "File {path:q} is not regular", path(path)); if ($haserr(*err)) { *err = $syserr(getErrno(*err), "Could not check file existance: {cause:$M}", @@ -3947,17 +4006,52 @@ pioLocalDrive_pioOpen(VSelf, path_t path, int flags, return bind_pioFile(file); } -static struct stat +static pio_stat_t pioLocalDrive_pioStat(VSelf, path_t path, bool follow_symlink, err_i *err) { struct stat st = {0}; + pio_stat_t pst = {0}; int r; fobj_reset_err(err); r = follow_symlink ? stat(path, &st) : lstat(path, &st); if (r < 0) *err = $syserr(errno, "Cannot stat file {path:q}", path(path)); - return st; + else + { + pst.pst_kind = pio_statmode2file_kind(st.st_mode, path); + pst.pst_mode = pio_limit_mode(st.st_mode); + pst.pst_size = st.st_size; + pst.pst_mtime = st.st_mtime; + } + return pst; +} + +/* + * Compare, that filename1 and filename2 is the same file + * in windows compare only filenames + */ +static bool +pioLocalDrive_pioFilesAreSame(VSelf, path_t file1, path_t file2) +{ +#ifndef WIN32 + struct stat stat1, stat2; + + if (stat(file1, &stat1) < 0) + elog(ERROR, "Can't stat file \"%s\": %s", file1, strerror(errno)); + + if (stat(file2, &stat2) < 0) + elog(ERROR, "Can't stat file \"%s\": %s", file1, strerror(errno)); + + return stat1.st_ino == stat2.st_ino && stat1.st_dev == stat2.st_dev; +#else + char *abs_name1 = make_absolute_path(file1); + char *abs_name2 = make_absolute_path(file2); + bool result = strcmp(abs_name1, abs_name2) == 0; + free(abs_name2); + free(abs_name1); + return result; +#endif } #define pioLocalDrive_pioExists common_pioExists @@ -4213,10 +4307,10 @@ pioRemoteDrive_pioOpen(VSelf, path_t path, return bind_pioFile(file); } -static struct stat +static pio_stat_t pioRemoteDrive_pioStat(VSelf, path_t path, bool follow_symlink, err_i *err) { - struct stat st = {0}; + pio_stat_t st = {0}; fio_header hdr = { .cop = FIO_STAT, .handle = -1, @@ -4240,6 +4334,32 @@ pioRemoteDrive_pioStat(VSelf, path_t path, bool follow_symlink, err_i *err) return st; } +static bool +pioRemoteDrive_pioFilesAreSame(VSelf, path_t file1, path_t file2) +{ + fio_header hdr = { + .cop = FIO_FILES_ARE_SAME, + .handle = -1, + .arg = 0, + }; + char _buf[512]; + ft_strbuf_t buf = ft_strbuf_init_stack(_buf, sizeof(_buf)); + ft_strbuf_catc(&buf, file1); + ft_strbuf_cat1(&buf, '\x00'); + ft_strbuf_catc(&buf, file2); + hdr.size = buf.len + 1; + + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_write_all(fio_stdout, buf.ptr, buf.len+1), buf.len+1); + + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + ft_dbg_assert(hdr.cop == FIO_FILES_ARE_SAME); + + ft_strbuf_free(&buf); + + return hdr.arg == 1; +} + #define pioRemoteDrive_pioExists common_pioExists static err_i @@ -4388,6 +4508,7 @@ pioRemoteDrive_pioListDir(VSelf, parray *files, const char *root, bool handle_ta /* receive metainformation */ IO_CHECK(fio_read_all(fio_stdin, &fio_file, sizeof(fio_file)), sizeof(fio_file)); + file->kind = fio_file.kind; file->mode = fio_file.mode; file->size = fio_file.size; file->mtime = fio_file.mtime; diff --git a/src/utils/file.h b/src/utils/file.h index 5aa26c21e..991bfe244 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -67,7 +67,8 @@ typedef enum FIO_READLINK, FIO_SYNC_FILE, FIO_SEND_FILE_CONTENT, - FIO_PAGE_ZERO + FIO_PAGE_ZERO, + FIO_FILES_ARE_SAME, } fio_operations; typedef struct @@ -92,6 +93,24 @@ typedef enum FIO_REMOTE_HOST /* date is located at remote host */ } fio_location; +typedef enum pio_file_kind { + PIO_KIND_UNKNOWN = 0, + PIO_KIND_REGULAR = 1, + PIO_KIND_DIRECTORY = 2, + PIO_KIND_SYMLINK = 3, + PIO_KIND_FIFO = 4, + PIO_KIND_SOCK = 5, + PIO_KIND_CHARDEV = 6, + PIO_KIND_BLOCKDEV = 7, +} pio_file_kind_e; + +typedef struct pio_stat { + uint64_t pst_size; + int64_t pst_mtime; + uint32_t pst_mode; + pio_file_kind_e pst_kind; +} pio_stat_t; + extern fio_location MyLocation; extern void setMyLocation(ProbackupSubcmd const subcmd); @@ -125,7 +144,6 @@ extern int fio_check_error_fd_gz(gzFile f, char **errmsg); extern ssize_t fio_read(int fd, void* buf, size_t size); extern int fio_flush(int fd); extern int fio_seek(int fd, off_t offs); -extern int fio_fstat(int fd, struct stat* st); extern int fio_truncate(int fd, off_t size); extern int fio_close(int fd); @@ -142,7 +160,6 @@ extern int fio_fflush(FILE* f); extern int fio_fseek(FILE* f, off_t offs); extern int fio_ftruncate(FILE* f, off_t size); extern int fio_fclose(FILE* f); -extern int fio_ffstat(FILE* f, struct stat* st); extern FILE* fio_open_stream(fio_location location, const char* name); extern int fio_close_stream(FILE* f); @@ -175,8 +192,6 @@ extern int fio_symlink(fio_location location, const char* target, const char extern int fio_remove(fio_location location, const char* path, bool missing_ok); extern int fio_chmod(fio_location location, const char* path, int mode); extern int fio_access(fio_location location, const char* path, int mode); -extern int fio_stat(fio_location location, const char* path, struct stat* st, bool follow_symlinks); -extern bool fio_is_same_file(fio_location location, const char* filename1, const char* filename2, bool follow_symlink); extern ssize_t fio_readlink(fio_location location, const char *path, char *value, size_t valsiz); extern pid_t fio_check_postmaster(fio_location location, const char *pgdata); @@ -198,6 +213,10 @@ extern pg_crc32 pgFileGetCRC32(const char *file_path, bool missing_ok); #endif extern pg_crc32 pgFileGetCRC32Cgz(const char *file_path, bool missing_ok); +extern pio_file_kind_e pio_statmode2file_kind(mode_t mode, const char* path); +extern pio_file_kind_e pio_str2file_kind(const char* str, const char* path); +extern const char* pio_file_kind2str(pio_file_kind_e kind, const char* path); +extern mode_t pio_limit_mode(mode_t mode); // OBJECTS @@ -237,19 +256,19 @@ fobj_iface(pioWriteFlush); fobj_iface(pioWriteCloser); fobj_iface(pioReadCloser); -typedef struct stat stat_t; - // Drive #define mth__pioOpen pioFile_i, (path_t, path), (int, flags), \ (int, permissions), (err_i *, err) #define mth__pioOpen__optional() (permissions, FILE_PERMISSION) -#define mth__pioStat stat_t, (path_t, path), (bool, follow_symlink), \ +#define mth__pioStat pio_stat_t, (path_t, path), (bool, follow_symlink), \ (err_i *, err) #define mth__pioRemove err_i, (path_t, path), (bool, missing_ok) #define mth__pioRename err_i, (path_t, old_path), (path_t, new_path) #define mth__pioExists bool, (path_t, path), (err_i *, err) #define mth__pioGetCRC32 pg_crc32, (path_t, path), (bool, compressed), \ (err_i *, err) +/* Compare, that filename1 and filename2 is the same file */ +#define mth__pioFilesAreSame bool, (path_t, file1), (path_t, file2) #define mth__pioIsRemote bool #define mth__pioMakeDir err_i, (path_t, path), (mode_t, mode), (bool, strict) #define mth__pioListDir void, (parray *, files), (const char *, root), \ @@ -265,12 +284,14 @@ fobj_method(pioExists); fobj_method(pioIsRemote); fobj_method(pioGetCRC32); fobj_method(pioMakeDir); +fobj_method(pioFilesAreSame); fobj_method(pioListDir); fobj_method(pioRemoveDir); #define iface__pioDrive mth(pioOpen, pioStat, pioRemove, pioRename), \ mth(pioExists, pioGetCRC32, pioIsRemote), \ - mth(pioMakeDir, pioListDir, pioRemoveDir) + mth(pioMakeDir, pioListDir, pioRemoveDir), \ + mth(pioFilesAreSame) fobj_iface(pioDrive); extern pioDrive_i pioDriveForLocation(fio_location location); diff --git a/src/validate.c b/src/validate.c index 84a27adcb..8adeb52a9 100644 --- a/src/validate.c +++ b/src/validate.c @@ -55,6 +55,7 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params) pthread_t *threads; validate_files_arg *threads_args; int i; + err_i err; // parray *dbOid_exclude_list = NULL; /* Check backup program version */ @@ -199,14 +200,16 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params) (parse_program_version(backup->program_version) == 20201))) { char path[MAXPGPATH]; - struct stat st; + pio_stat_t st; join_path_components(path, backup->root_dir, DATABASE_FILE_LIST); - if (fio_stat(FIO_BACKUP_HOST, path, &st, true) < 0) - elog(ERROR, "Cannot stat file \"%s\": %s", path, strerror(errno)); + st = $i(pioStat, pioDriveForLocation(FIO_BACKUP_HOST), + .path = path, .follow_symlink = true, .err = &err); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), ""); - if (st.st_size >= (BLCKSZ*500)) + if (st.pst_size >= (BLCKSZ*500)) { elog(WARNING, "Backup %s is a victim of metadata corruption. " "Additional information can be found here: " @@ -242,7 +245,7 @@ pgBackupValidateFiles(void *arg) elog(ERROR, "Interrupted during validate"); /* Validate only regular files */ - if (!S_ISREG(file->mode)) + if (file->kind != PIO_KIND_REGULAR) continue; /* From 0e24b62a73c00277036e72f2870e175574e859c7 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 17 Nov 2022 01:45:21 +0300 Subject: [PATCH 087/339] [PBCKP-345] more consistent use of int64_t for file sizes --- src/archive.c | 2 +- src/catchup.c | 6 +++--- src/data.c | 19 ++++++++++++------- src/delete.c | 6 +++--- src/fetch.c | 2 +- src/pg_probackup.h | 10 +++++----- src/show.c | 2 +- src/util.c | 6 +++--- src/utils/file.c | 3 ++- src/utils/file.h | 2 +- 10 files changed, 32 insertions(+), 26 deletions(-) diff --git a/src/archive.c b/src/archive.c index abc04e86e..1c1b5e419 100644 --- a/src/archive.c +++ b/src/archive.c @@ -390,7 +390,7 @@ push_file_internal(const char *wal_file_name, const char *pg_xlog_dir, /* partial handling */ pio_stat_t st; int partial_try_count = 0; - ssize_t partial_file_size = 0; + int64_t partial_file_size = 0; bool partial_is_stale = true; size_t len; err_i err = $noerr(); diff --git a/src/catchup.c b/src/catchup.c index 9f6779302..038767470 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -439,7 +439,7 @@ catchup_thread_runner(void *arg) if (file->write_size == BYTES_INVALID) { - elog(LOG, "Skipping the unchanged file: \"%s\", read %zu bytes", from_fullpath, file->read_size); + elog(LOG, "Skipping the unchanged file: \"%s\", read %lld bytes", from_fullpath, (long long)file->read_size); continue; } @@ -630,8 +630,8 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, /* for fancy reporting */ time_t start_time, end_time; - ssize_t transfered_datafiles_bytes = 0; - ssize_t transfered_walfiles_bytes = 0; + int64_t transfered_datafiles_bytes = 0; + int64_t transfered_walfiles_bytes = 0; char pretty_source_bytes[20]; err_i err = $noerr(); diff --git a/src/data.c b/src/data.c index d9eaf0807..bc1f18097 100644 --- a/src/data.c +++ b/src/data.c @@ -507,7 +507,8 @@ backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpat * NOTE This is a normal situation, if the file size has changed * since the moment we computed it. */ - file->n_blocks = file->size/BLCKSZ; + file->n_blocks = (typeof(file->n_blocks))(file->size/BLCKSZ); + Assert((int64_t)file->n_blocks * BLCKSZ == file->size); /* * Skip unchanged file only if it exists in previous backup. @@ -611,12 +612,15 @@ backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpat elog(ERROR, "Cannot read file \"%s\"", from_fullpath); } - file->read_size = rc * BLCKSZ; + file->read_size = (int64_t)rc * BLCKSZ; /* refresh n_blocks for FULL and DELTA */ if (backup_mode == BACKUP_MODE_FULL || backup_mode == BACKUP_MODE_DIFF_DELTA) - file->n_blocks = file->read_size / BLCKSZ; + { + file->n_blocks = (typeof(file->n_blocks))(file->read_size / BLCKSZ); + Assert((int64_t)file->n_blocks * BLCKSZ == file->read_size); + } /* Determine that file didn`t changed in case of incremental backup */ if (backup_mode != BACKUP_MODE_FULL && @@ -650,7 +654,7 @@ backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpat void catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpath, XLogRecPtr sync_lsn, BackupMode backup_mode, - uint32 checksum_version, size_t prev_size) + uint32 checksum_version, int64_t prev_size) { int rc; bool use_pagemap; @@ -760,7 +764,7 @@ catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpa elog(ERROR, "Cannot read file \"%s\"", from_fullpath); } - file->read_size = rc * BLCKSZ; + file->read_size = (int64_t)rc * BLCKSZ; /* Determine that file didn`t changed in case of incremental catchup */ if (backup_mode != BACKUP_MODE_FULL && @@ -1595,7 +1599,8 @@ check_data_file(ConnectionArgs *arguments, pgFile *file, * NOTE This is a normal situation, if the file size has changed * since the moment we computed it. */ - nblocks = file->size/BLCKSZ; + nblocks = (typeof(nblocks))(file->size/BLCKSZ); + Assert((int64_t)nblocks * BLCKSZ == file->size); for (blknum = 0; blknum < nblocks; blknum++) { @@ -2275,7 +2280,7 @@ copy_pages(const char *to_fullpath, const char *from_fullpath, elog(ERROR, "Cannot seek to end of file position in destination file \"%s\": %s", to_fullpath, strerror(errno)); { - long pos = ftell(out); + int64_t pos = ftell(out); if (pos < 0) elog(ERROR, "Cannot get position in destination file \"%s\": %s", diff --git a/src/delete.c b/src/delete.c index b6ed23966..9873cb163 100644 --- a/src/delete.c +++ b/src/delete.c @@ -794,8 +794,8 @@ delete_walfiles_in_tli(InstanceState *instanceState, XLogRecPtr keep_lsn, timeli char first_to_del_str[MAXFNAMELEN]; char oldest_to_keep_str[MAXFNAMELEN]; int i; - size_t wal_size_logical = 0; - size_t wal_size_actual = 0; + int64_t wal_size_logical = 0; + int64_t wal_size_actual = 0; char wal_pretty_size[20]; bool purge_all = false; @@ -837,7 +837,7 @@ delete_walfiles_in_tli(InstanceState *instanceState, XLogRecPtr keep_lsn, timeli /* sanity */ if (OldestToKeepSegNo > FirstToDeleteSegNo) { - wal_size_logical = (OldestToKeepSegNo - FirstToDeleteSegNo) * xlog_seg_size; + wal_size_logical = (int64_t)(OldestToKeepSegNo - FirstToDeleteSegNo) * xlog_seg_size; /* In case of 'purge all' scenario OldestToKeepSegNo will be deleted too */ if (purge_all) diff --git a/src/fetch.c b/src/fetch.c index bce8dacc4..d283af129 100644 --- a/src/fetch.c +++ b/src/fetch.c @@ -54,7 +54,7 @@ slurpFile(fio_location location, const char *datadir, const char *path, size_t * ft_logerr(FT_FATAL, $errmsg(err), "slurpFile"); } - if (statbuf.pst_size > SIZE_MAX) + if (statbuf.pst_size > SSIZE_MAX) ft_log(FT_FATAL, "file \"%s\" is too large: %lld", fullpath, (long long)statbuf.pst_size); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 4f8ba7376..f4571707d 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -214,14 +214,14 @@ typedef struct pgFile pio_file_kind_e kind; /* kind of file */ uint32_t mode; /* protection (permission) */ - uint64_t size; /* size of the file */ + int64_t size; /* size of the file */ - uint64_t read_size; /* size of the portion read (if only some pages are + int64_t read_size; /* size of the portion read (if only some pages are backed up, it's different from size) */ int64_t write_size; /* size of the backed-up file. BYTES_INVALID means that the file existed but was not backed up because not modified since last backup. */ - uint64_t uncompressed_size; /* size of the backed-up file before compression + int64_t uncompressed_size; /* size of the backed-up file before compression * and adding block headers. */ time_t mtime; /* file st_mtime attribute, can be used only @@ -593,7 +593,7 @@ struct timelineInfo { XLogSegNo end_segno; /* last present segment in this timeline */ size_t n_xlog_files; /* number of segments (only really existing) * does not include lost segments */ - size_t size; /* space on disk taken by regular WAL files */ + int64_t size; /* space on disk taken by regular WAL files */ parray *backups; /* array of pgBackup sturctures with info * about backups belonging to this timeline */ parray *xlog_filelist; /* array of ordinary WAL segments, '.partial' @@ -1055,7 +1055,7 @@ extern bool check_data_file(ConnectionArgs *arguments, pgFile *file, extern void catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpath, XLogRecPtr sync_lsn, BackupMode backup_mode, - uint32 checksum_version, size_t prev_size); + uint32 checksum_version, int64_t prev_size); extern void backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpath, XLogRecPtr prev_backup_start_lsn, BackupMode backup_mode, CompressAlg calg, int clevel, uint32 checksum_version, diff --git a/src/show.c b/src/show.c index 5440e28a2..c7d714062 100644 --- a/src/show.c +++ b/src/show.c @@ -1089,7 +1089,7 @@ show_archive_json(const char *instance_name, uint32 xlog_seg_size, appendPQExpBuffer(buf, "%zu", tlinfo->n_xlog_files); json_add_key(buf, "size", json_level); - appendPQExpBuffer(buf, "%zu", tlinfo->size); + appendPQExpBuffer(buf, "%lld", (long long)tlinfo->size); json_add_key(buf, "zratio", json_level); diff --git a/src/util.c b/src/util.c index 5b341fc22..a54e34e08 100644 --- a/src/util.c +++ b/src/util.c @@ -403,9 +403,9 @@ copy_pgcontrol_file(fio_location from_location, const char *from_fullpath, digestControlFile(&ControlFile, buffer, size); file->crc = ControlFile.crc; - file->read_size = size; - file->write_size = size; - file->uncompressed_size = size; + file->read_size = (int64_t)size; + file->write_size = (int64_t)size; + file->uncompressed_size = (int64_t)size; writeControlFile(to_location, to_fullpath, &ControlFile); diff --git a/src/utils/file.c b/src/utils/file.c index eae68201f..2b37ad611 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -53,7 +53,7 @@ typedef struct { pio_file_kind_e kind; mode_t mode; - size_t size; + int64_t size; time_t mtime; bool is_datafile; Oid tblspcOid; @@ -1914,6 +1914,7 @@ fio_send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, } req.arg.nblocks = file->size/BLCKSZ; + Assert((int64_t)req.arg.nblocks * BLCKSZ == file->size); req.arg.segmentno = file->segno * RELSEG_SIZE; req.arg.horizonLsn = horizonLsn; req.arg.checksumVersion = checksum_version; diff --git a/src/utils/file.h b/src/utils/file.h index 991bfe244..4dd95c871 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -105,7 +105,7 @@ typedef enum pio_file_kind { } pio_file_kind_e; typedef struct pio_stat { - uint64_t pst_size; + int64_t pst_size; int64_t pst_mtime; uint32_t pst_mode; pio_file_kind_e pst_kind; From c8baeae1431ea02be58cf11b22eadc1ceb6aec2c Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 18 Nov 2022 02:24:06 +0300 Subject: [PATCH 088/339] fix "copied" in pioCopyWithFilters --- src/utils/file.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/utils/file.c b/src/utils/file.c index 62067a2bb..f51773e70 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -5529,6 +5529,7 @@ pioCopyWithFilters(pioWriteFlush_i dest, pioRead_i src, path($irepr(dest)), wantedSz(read_len), writtenSz(write_len))); } + *copied += write_len; } /* pioWriteFinish will check for async error if destination was remote */ From 8dc07e92473bfd5015f4a2a693a34f26077e51c1 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 22 Nov 2022 13:08:14 +0300 Subject: [PATCH 089/339] [PBCKP-314] create_backup_dir should not elog(ERROR) on existing directory. --- src/catalog.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index 21a25f3d9..e6a460396 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1521,7 +1521,7 @@ pgBackupInitDir(pgBackup *backup, const char *backup_instance_path) .mode = DIR_PERMISSION, .strict = false); if ($haserr(err)) { - elog(ERROR, "Can not create backup directory: %s", $errmsg(err)); + elog(ERROR, "Can not create backup subdirectory: %s", $errmsg(err)); } } @@ -1549,7 +1549,7 @@ create_backup_dir(pgBackup *backup, const char *backup_instance_path) if (!$haserr(err)) { backup->root_dir = pgut_strdup(path); - } else { + } else if (getErrno(err) != EEXIST) { elog(ERROR, "Can not create backup directory: %s", $errmsg(err)); } From e8c9b36560e5b59e851efd627c33d6802d7c9130 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 22 Nov 2022 13:23:39 +0300 Subject: [PATCH 090/339] ft_bytes_alloc/ft_bytes_free --- src/fu_util/fo_obj.h | 1 + src/fu_util/ft_util.h | 9 +++++++++ 2 files changed, 10 insertions(+) diff --git a/src/fu_util/fo_obj.h b/src/fu_util/fo_obj.h index 6ad423dc6..be2fcbc19 100644 --- a/src/fu_util/fo_obj.h +++ b/src/fu_util/fo_obj.h @@ -527,6 +527,7 @@ fobj_error_kind(RT); fobj_error_kind(SysErr); fobj_error_object_key(cause); +fobj_error_cstr_key(causeStr); fobj_error_int_key(errNo); fobj_error_cstr_key(errNoStr); #define fobj_errno_keys(errno) (errNo, errno), (errNoStr, ft_strerror(errno)) diff --git a/src/fu_util/ft_util.h b/src/fu_util/ft_util.h index 084eabf9b..db365f99b 100644 --- a/src/fu_util/ft_util.h +++ b/src/fu_util/ft_util.h @@ -314,6 +314,15 @@ ft_inline ft_bytes_t ft_bytes(void* ptr, size_t len) { return (ft_bytes_t){.ptr = (char*)ptr, .len = len}; } +ft_inline ft_bytes_t ft_bytes_alloc(size_t sz) { + return ft_bytes(ft_malloc(sz), sz); +} + +ft_inline void ft_bytes_free(ft_bytes_t* bytes) { + ft_free(bytes->ptr); + *bytes = ft_bytes(NULL, 0); +} + ft_inline void ft_bytes_consume(ft_bytes_t *bytes, size_t cut); ft_inline void ft_bytes_move(ft_bytes_t *dest, ft_bytes_t *src); From 104b732a9c5999d4963405e08b9067b8cb56784b Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 22 Nov 2022 13:05:39 +0300 Subject: [PATCH 091/339] change $isave() and $iresult to not issue warning --- src/fu_util/fo_obj.h | 2 +- src/fu_util/impl/fo_impl.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/fu_util/fo_obj.h b/src/fu_util/fo_obj.h index be2fcbc19..8799b6afc 100644 --- a/src/fu_util/fo_obj.h +++ b/src/fu_util/fo_obj.h @@ -598,7 +598,7 @@ fobj_klass(fobjErr); */ extern err_i fobj_err_combine(err_i first, err_i second); -#define fobj_reset_err(err) do { ft_dbg_assert(err); *err = (err_i){NULL}; } while(0) +#define fobj_reset_err(err) do { ft_dbg_assert(err != NULL); *err = (err_i){NULL}; } while(0) #include "./impl/fo_impl2.h" diff --git a/src/fu_util/impl/fo_impl.h b/src/fu_util/impl/fo_impl.h index 88a6d8aec..dc51b0f9b 100644 --- a/src/fu_util/impl/fo_impl.h +++ b/src/fu_util/impl/fo_impl.h @@ -543,8 +543,8 @@ extern void fobj__validate_args(fobj_method_handle_t meth, fobj_t self, #endif #define fobj__idel(iface) fobj_del((void*)&(iface)->self) -#define fobj__isave(iface) ((__typeof(iface)){.self = $save((iface).self)}) -#define fobj__iresult(iface) ((__typeof(iface)){.self = $result((iface).self)}) +#define fobj__isave(iface) ({__typeof(iface) t=(iface); $save(t.self); t;}) +#define fobj__iresult(iface) ({__typeof(iface) t=(iface); $result(t.self); t;}) #define fobj__ireturn(iface) return $iresult(iface) /* Autorelease pool handling */ From 3f64395c38bc88e838da0db7e24d0a2efef9218e Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 17 Nov 2022 05:43:57 +0300 Subject: [PATCH 092/339] [PBCKP-349] pioReadFile replaces slurpFile --- src/pg_probackup.h | 3 +- src/util.c | 182 ++++++++++++++++++++++----------------------- src/utils/file.c | 161 +++++++++++++++++++++++++++++++++++++++ src/utils/file.h | 11 ++- 4 files changed, 259 insertions(+), 98 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 8ee22ca65..9ccecd620 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1141,11 +1141,10 @@ extern XLogRecPtr get_next_record_lsn(const char *archivedir, XLogSegNo segno, T /* in util.c */ extern TimeLineID get_current_timeline(PGconn *conn); -extern TimeLineID get_current_timeline_from_control(fio_location location, const char *pgdata_path, bool safe); +extern TimeLineID get_current_timeline_from_control(fio_location location, const char *pgdata_path); extern XLogRecPtr get_checkpoint_location(PGconn *conn); extern uint64 get_system_identifier(fio_location location, const char *pgdata_path, bool safe); extern uint64 get_remote_system_identifier(PGconn *conn); -extern uint32 get_data_checksum_version(bool safe); extern pg_crc32c get_pgcontrol_checksum(const char *pgdata_path); extern uint32 get_xlog_seg_size(const char *pgdata_path); extern void get_redo(fio_location location, const char *pgdata_path, RedoParams *redo); diff --git a/src/util.c b/src/util.c index a54e34e08..782c859aa 100644 --- a/src/util.c +++ b/src/util.c @@ -96,24 +96,6 @@ checkControlFile(ControlFileData *ControlFile) "the PostgreSQL installation would be incompatible with this data directory."); } -/* - * Verify control file contents in the buffer src, and copy it to *ControlFile. - */ -static void -digestControlFile(ControlFileData *ControlFile, char *src, size_t size) -{ - int ControlFileSize = PG_CONTROL_FILE_SIZE; - - if (size != ControlFileSize) - elog(ERROR, "unexpected control file size %d, expected %d", - (int) size, ControlFileSize); - - memcpy(ControlFile, src, sizeof(ControlFileData)); - - /* Additional checks on control file */ - checkControlFile(ControlFile); -} - /* * Write ControlFile to pg_control */ @@ -164,7 +146,7 @@ get_current_timeline(PGconn *conn) if (PQresultStatus(res) == PGRES_TUPLES_OK) val = PQgetvalue(res, 0, 0); else - return get_current_timeline_from_control(FIO_DB_HOST, instance_config.pgdata, false); + return get_current_timeline_from_control(FIO_DB_HOST, instance_config.pgdata); if (!parse_uint32(val, &tli, 0)) { @@ -172,28 +154,62 @@ get_current_timeline(PGconn *conn) elog(WARNING, "Invalid value of timeline_id %s", val); /* TODO 3.0 remove it and just error out */ - return get_current_timeline_from_control(FIO_DB_HOST, instance_config.pgdata, false); + return get_current_timeline_from_control(FIO_DB_HOST, instance_config.pgdata); } return tli; } +static err_i +get_control_file(fio_location location, path_t pgdata_path, path_t file, + ControlFileData *control, bool safe) +{ + pioDrive_i drive; + char fullpath[MAXPGPATH]; + ft_bytes_t bytes; + err_i err; + + fobj_reset_err(&err); + + join_path_components(fullpath, pgdata_path, file); + + drive = pioDriveForLocation(location); + bytes = $i(pioReadFile, drive, .path = fullpath, .err = &err); + if ($haserr(err) && safe) + { + ft_logerr(FT_WARNING, $errmsg(err), "Could not get control file"); + memset(control, 0, sizeof(ControlFileData)); + return $noerr(); + } + if ($haserr(err)) + return $err(RT, "Could not get control file: {cause}", + cause(err.self)); + + if (bytes.len != PG_CONTROL_FILE_SIZE) + return $err(RT, "unexpected control file size: {size}, expected {wantedSz}", + size(bytes.len), wantedSz(PG_CONTROL_FILE_SIZE)); + + memcpy(control, bytes.ptr, sizeof(ControlFileData)); + ft_bytes_free(&bytes); + + /* Additional checks on control file */ + checkControlFile(control); + + return $noerr(); +} + /* Get timeline from pg_control file */ TimeLineID -get_current_timeline_from_control(fio_location location, const char *pgdata_path, bool safe) +get_current_timeline_from_control(fio_location location, const char *pgdata_path) { + FOBJ_FUNC_ARP(); ControlFileData ControlFile; - char *buffer; - size_t size; + err_i err; - /* First fetch file... */ - buffer = slurpFile(location, pgdata_path, XLOG_CONTROL_FILE, - &size, safe); - if (safe && buffer == NULL) - return 0; - - digestControlFile(&ControlFile, buffer, size); - pg_free(buffer); + err = get_control_file(location, pgdata_path, XLOG_CONTROL_FILE, + &ControlFile, false); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Getting current timeline"); return ControlFile.checkPointCopy.ThisTimeLineID; } @@ -223,16 +239,14 @@ get_checkpoint_location(PGconn *conn) uint64 get_system_identifier(fio_location location, const char *pgdata_path, bool safe) { + FOBJ_FUNC_ARP(); ControlFileData ControlFile; - char *buffer; - size_t size; + err_i err; - /* First fetch file... */ - buffer = slurpFile(location, pgdata_path, XLOG_CONTROL_FILE, &size, safe); - if (safe && buffer == NULL) - return 0; - digestControlFile(&ControlFile, buffer, size); - pg_free(buffer); + err = get_control_file(location, pgdata_path, XLOG_CONTROL_FILE, + &ControlFile, safe); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Getting system identifier"); return ControlFile.system_identifier; } @@ -262,14 +276,14 @@ uint32 get_xlog_seg_size(const char *pgdata_path) { #if PG_VERSION_NUM >= 110000 + FOBJ_FUNC_ARP(); ControlFileData ControlFile; - char *buffer; - size_t size; + err_i err; - /* First fetch file... */ - buffer = slurpFile(FIO_DB_HOST, pgdata_path, XLOG_CONTROL_FILE, &size, false); - digestControlFile(&ControlFile, buffer, size); - pg_free(buffer); + err = get_control_file(FIO_DB_HOST, pgdata_path, XLOG_CONTROL_FILE, + &ControlFile, false); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Trying to fetch segment size"); return ControlFile.xlog_seg_size; #else @@ -277,36 +291,17 @@ get_xlog_seg_size(const char *pgdata_path) #endif } -uint32 -get_data_checksum_version(bool safe) -{ - ControlFileData ControlFile; - char *buffer; - size_t size; - - /* First fetch file... */ - buffer = slurpFile(FIO_DB_HOST, instance_config.pgdata, XLOG_CONTROL_FILE, - &size, safe); - if (buffer == NULL) - return 0; - digestControlFile(&ControlFile, buffer, size); - pg_free(buffer); - - return ControlFile.data_checksum_version; -} - pg_crc32c get_pgcontrol_checksum(const char *pgdata_path) { + FOBJ_FUNC_ARP(); ControlFileData ControlFile; - char *buffer; - size_t size; + err_i err; - /* First fetch file... */ - buffer = slurpFile(FIO_BACKUP_HOST, pgdata_path, XLOG_CONTROL_FILE, &size, false); - elog(WARNING, "checking %s", pgdata_path); - digestControlFile(&ControlFile, buffer, size); - pg_free(buffer); + err = get_control_file(FIO_BACKUP_HOST, pgdata_path, XLOG_CONTROL_FILE, + &ControlFile, false); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Getting pgcontrol checksum"); return ControlFile.crc; } @@ -314,15 +309,14 @@ get_pgcontrol_checksum(const char *pgdata_path) void get_redo(fio_location location, const char *pgdata_path, RedoParams *redo) { + FOBJ_FUNC_ARP(); ControlFileData ControlFile; - char *buffer; - size_t size; - - /* First fetch file... */ - buffer = slurpFile(location, pgdata_path, XLOG_CONTROL_FILE, &size, false); + err_i err; - digestControlFile(&ControlFile, buffer, size); - pg_free(buffer); + err = get_control_file(location, pgdata_path, XLOG_CONTROL_FILE, + &ControlFile, false); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Fetching redo lsn"); redo->lsn = ControlFile.checkPointCopy.redo; redo->tli = ControlFile.checkPointCopy.ThisTimeLineID; @@ -352,14 +346,15 @@ void set_min_recovery_point(pgFile *file, const char *backup_path, XLogRecPtr stop_backup_lsn) { + FOBJ_FUNC_ARP(); ControlFileData ControlFile; - char *buffer; - size_t size; - char fullpath[MAXPGPATH]; + char fullpath[MAXPGPATH]; + err_i err; - /* First fetch file content */ - buffer = slurpFile(FIO_DB_HOST, instance_config.pgdata, XLOG_CONTROL_FILE, &size, false); - digestControlFile(&ControlFile, buffer, size); + err = get_control_file(FIO_DB_HOST, instance_config.pgdata, XLOG_CONTROL_FILE, + &ControlFile, false); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Set min recovery point"); elog(LOG, "Current minRecPoint %X/%X", (uint32) (ControlFile.minRecoveryPoint >> 32), @@ -383,8 +378,6 @@ set_min_recovery_point(pgFile *file, const char *backup_path, /* Update pg_control checksum in backup_list */ file->crc = ControlFile.crc; - - pg_free(buffer); } /* @@ -394,22 +387,21 @@ void copy_pgcontrol_file(fio_location from_location, const char *from_fullpath, fio_location to_location, const char *to_fullpath, pgFile *file) { + FOBJ_FUNC_ARP(); ControlFileData ControlFile; - char *buffer; - size_t size; + err_i err; - buffer = slurpFile(from_location, from_fullpath, "", &size, false); - - digestControlFile(&ControlFile, buffer, size); + err = get_control_file(from_location, from_fullpath, "", + &ControlFile, false); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Fetching control file"); file->crc = ControlFile.crc; - file->read_size = (int64_t)size; - file->write_size = (int64_t)size; - file->uncompressed_size = (int64_t)size; + file->read_size = PG_CONTROL_FILE_SIZE; + file->write_size = PG_CONTROL_FILE_SIZE; + file->uncompressed_size = PG_CONTROL_FILE_SIZE; writeControlFile(to_location, to_fullpath, &ControlFile); - - pg_free(buffer); } /* diff --git a/src/utils/file.c b/src/utils/file.c index f51773e70..ef07ffb4c 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -3605,6 +3605,7 @@ fio_communicate(int in, int out) fio_header hdr; pioDrive_i drive; pio_stat_t st; + ft_bytes_t bytes; int rc; int tmp_fd; pg_crc32 crc; @@ -3725,6 +3726,27 @@ fio_communicate(int in, int out) hdr.size = 0; IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); break; + case FIO_READ_FILE_AT_ONCE: + bytes = $i(pioReadFile, drive, .path = buf, + .binary = hdr.arg != 0, .err = &err); + if ($haserr(err)) + { + const char *msg = $errmsg(err); + hdr.arg = getErrno(err); + hdr.size = strlen(msg) + 1; + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_write_all(out, msg, hdr.size), hdr.size); + } + else + { + hdr.arg = 0; + hdr.size = bytes.len; + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); + if (bytes.len > 0) + IO_CHECK(fio_write_all(out, bytes.ptr, bytes.len), bytes.len); + } + ft_bytes_free(&bytes); + break; case FIO_ACCESS: /* Check presence of file with specified name */ hdr.size = 0; hdr.arg = access(buf, hdr.arg) < 0 ? errno : 0; @@ -4163,6 +4185,89 @@ pioLocalDrive_pioRemoveDir(VSelf, const char *root, bool root_as_well) { parray_free(files); } +static ft_bytes_t +pioLocalDrive_pioReadFile(VSelf, path_t path, bool binary, err_i* err) +{ + FOBJ_FUNC_ARP(); + Self(pioLocalDrive); + pioFile_i fl; + pio_stat_t st; + ft_bytes_t res = ft_bytes(NULL, 0); + size_t amount; + + fobj_reset_err(err); + + st = $(pioStat, self, .path = path, .follow_symlink = true, .err = err); + if ($haserr(*err)) + { + $iresult(*err); + return res; + } + if (st.pst_kind != PIO_KIND_REGULAR) + { + *err = $err(RT, "File {path:q} is not regular: {kind}", path(path), + kind(pio_file_kind2str(st.pst_kind, path))); + $iresult(*err); + return res; + } + + /* forbid too large file because of remote protocol */ + if (st.pst_size >= INT32_MAX) + { + *err = $err(RT, "File {path:q} is too large: {size}", path(path), + size(st.pst_size), errNo(ENOMEM)); + $iresult(*err); + return res; + } + if (binary) + res = ft_bytes_alloc(st.pst_size); + else + { + res = ft_bytes_alloc(st.pst_size + 1); + res.len -= 1; + } + + /* + * rely on "local file is read whole at once always". + * Is it true? + */ + fl = $(pioOpen, self, .path = path, .flags = O_RDONLY | (binary ? PG_BINARY : 0), + .err = err); + if ($haserr(*err)) + { + $iresult(*err); + return res; + } + + amount = pioReadFull($reduce(pioRead, fl), res, err); + if ($haserr(*err)) + { + ft_bytes_free(&res); + $iresult(*err); + return res; + } + + if (amount != st.pst_size) + { + ft_bytes_free(&res); + *err = $err(RT, "File {path:q} is truncated while reading", + path(path), errNo(EBUSY)); + $iresult(*err); + return res; + } + + if (binary) + res.len = amount; + else + { + res.len = amount + 1; + res.ptr[amount] = 0; + } + + $i(pioClose, fl); + return res; +} + /* LOCAL FILE */ static void pioLocalFile_fobjDispose(VSelf) @@ -4576,6 +4681,43 @@ pioRemoteDrive_pioRemoveDir(VSelf, const char *root, bool root_as_well) { elog(ERROR, "couldn't remove remote dir"); } +static ft_bytes_t +pioRemoteDrive_pioReadFile(VSelf, path_t path, bool binary, err_i* err) +{ + FOBJ_FUNC_ARP(); + Self(pioLocalDrive); + ft_bytes_t res; + + fobj_reset_err(err); + + fio_header hdr = { + .cop = FIO_READ_FILE_AT_ONCE, + .handle = -1, + .size = strlen(path)+1, + .arg = binary, + }; + + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_write_all(fio_stdout, path, hdr.size), hdr.size); + + /* get the response */ + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + Assert(hdr.cop == FIO_READ_FILE_AT_ONCE); + + res = ft_bytes_alloc(hdr.size); + IO_CHECK(fio_read_all(fio_stdin, res.ptr, hdr.size), hdr.size); + + if (hdr.arg != 0) + { + *err = $syserr((int)hdr.arg, "Could not read remote file {path:q}: {causeStr}", + path(path), causeStr(res.ptr)); + $iresult(*err); + ft_bytes_free(&res); + } + + return res; +} + /* REMOTE FILE */ static err_i @@ -5540,6 +5682,25 @@ pioCopyWithFilters(pioWriteFlush_i dest, pioRead_i src, return $noerr(); } +size_t +pioReadFull(pioRead_i src, ft_bytes_t bytes, err_i* err) +{ + ft_bytes_t b; + size_t r; + fobj_reset_err(err); + + b = bytes; + while (b.len) + { + r = $i(pioRead, src, b, err); + Assert(r <= b.len); + ft_bytes_consume(&b, r); + if ($haserr(*err)) + break; + } + return bytes.len - b.len; +} + fobj_klass_handle(pioFile); fobj_klass_handle(pioLocalDrive); fobj_klass_handle(pioRemoteDrive); diff --git a/src/utils/file.h b/src/utils/file.h index 5fb0bb3da..440be0210 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -69,6 +69,7 @@ typedef enum FIO_SEND_FILE_CONTENT, FIO_PAGE_ZERO, FIO_FILES_ARE_SAME, + FIO_READ_FILE_AT_ONCE, } fio_operations; typedef struct @@ -228,6 +229,8 @@ typedef const char* path_t; fobj_error_cstr_key(remotemsg); fobj_error_int_key(writtenSz); fobj_error_int_key(wantedSz); +fobj_error_int_key(size); +fobj_error_cstr_key(kind); #ifdef HAVE_LIBZ fobj_error_kind(GZ); @@ -276,6 +279,9 @@ fobj_iface(pioReadCloser); (bool, handle_tablespaces), (bool, symlink_and_hidden), \ (bool, backup_logs), (bool, skip_hidden), (int, external_dir_num) #define mth__pioRemoveDir void, (const char *, root), (bool, root_as_well) +#define mth__pioReadFile ft_bytes_t, (path_t, path), (bool, binary), \ + (err_i *, err) +#define mth__pioReadFile__optional() (binary, true) fobj_method(pioOpen); fobj_method(pioStat); @@ -288,11 +294,12 @@ fobj_method(pioMakeDir); fobj_method(pioFilesAreSame); fobj_method(pioListDir); fobj_method(pioRemoveDir); +fobj_method(pioReadFile); #define iface__pioDrive mth(pioOpen, pioStat, pioRemove, pioRename), \ mth(pioExists, pioGetCRC32, pioIsRemote), \ mth(pioMakeDir, pioListDir, pioRemoveDir), \ - mth(pioFilesAreSame) + mth(pioFilesAreSame), mth(pioReadFile) fobj_iface(pioDrive); extern pioDrive_i pioDriveForLocation(fio_location location); @@ -341,4 +348,6 @@ extern err_i pioCopyWithFilters(pioWriteFlush_i dest, pioRead_i src, pioFilter_i _fltrs_[] = {__VA_ARGS__}; \ pioCopyWithFilters((dest), (src), _fltrs_, ft_arrsz(_fltrs_), NULL); \ }) + +extern size_t pioReadFull(pioRead_i src, ft_bytes_t bytes, err_i* err); #endif From 09d07a5f4838f0bf4691eacb6c380eb64d9594b1 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 22 Nov 2022 19:51:20 +0300 Subject: [PATCH 093/339] revert S3 integration --- Makefile | 16 --------- src/pg_probackup.c | 81 +--------------------------------------------- src/pg_probackup.h | 18 ----------- src/utils/file.c | 14 -------- src/utils/file.h | 1 - 5 files changed, 1 insertion(+), 129 deletions(-) diff --git a/Makefile b/Makefile index 0bffd891a..3c8cb2c15 100644 --- a/Makefile +++ b/Makefile @@ -76,19 +76,6 @@ include $(top_builddir)/src/Makefile.global include $(top_srcdir)/contrib/contrib-global.mk endif -ifndef S3_DIR - ifneq ("$(wildcard $(abspath $(top_pbk_srcdir))/../s3)", "") - S3_DIR = $(abspath $(CURDIR))/../s3 - endif -endif - -ifdef S3_DIR - LDFLAGS += -lcurl - CFLAGS += $(shell pkg-config --cflags libxml-2.0) -DPBCKP_S3=1 - LDFLAGS += $(shell pkg-config --libs libxml-2.0) - OBJS += $(S3_DIR)/s3.o -endif - # PG_CPPFLAGS = -I$(libpq_srcdir) ${PTHREAD_CFLAGS} -I$(top_pbk_srcdir)src -I$(BORROW_DIR) PG_CPPFLAGS += -I$(top_pbk_srcdir)src/fu_util -Wno-declaration-after-statement @@ -100,9 +87,6 @@ PG_LIBS_INTERNAL = $(libpq_pgport) ${PTHREAD_CFLAGS} # additional dependencies on borrowed files src/backup.o src/catchup.o src/pg_probackup.o: $(BORROW_DIR)/streamutil.h -ifdef S3_DIR - src/backup.o src/catchup.o src/pg_probackup.o: $(S3_DIR)/s3.o -endif src/stream.o $(BORROW_DIR)/receivelog.o $(BORROW_DIR)/streamutil.o $(BORROW_DIR)/walmethods.o: $(BORROW_DIR)/receivelog.h $(BORROW_DIR)/receivelog.h: $(BORROW_DIR)/walmethods.h diff --git a/src/pg_probackup.c b/src/pg_probackup.c index c1055ab4b..1df7bffe6 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -164,12 +164,6 @@ int64 ttl = -1; static char *expire_time_string = NULL; static pgSetBackupParams *set_backup_params = NULL; -#ifdef PBCKP_S3 -/* S3 options */ -S3_protocol s3_protocol; -char* s3_target_bucket = NULL; -#endif - /* ================ backupState =========== */ static char *backup_id_string = NULL; pgBackup current; @@ -180,9 +174,6 @@ static bool help_opt = false; static void opt_incr_restore_mode(ConfigOption *opt, const char *arg); static void opt_backup_mode(ConfigOption *opt, const char *arg); static void opt_show_format(ConfigOption *opt, const char *arg); -#ifdef PBCKP_S3 -static void opt_s3_protocol(ConfigOption *opt, const char *arg); -#endif static void compress_init(ProbackupSubcmd const subcmd); @@ -279,12 +270,6 @@ static ConfigOption cmd_options[] = { 'I', 170, "ttl", &ttl, SOURCE_CMD_STRICT, SOURCE_DEFAULT, 0, OPTION_UNIT_S, option_get_value}, { 's', 171, "expire-time", &expire_time_string, SOURCE_CMD_STRICT }, -#ifdef PBCKP_S3 - /* S3 options */ - { 'f', 245, "s3", opt_s3_protocol, SOURCE_CMD_STRICT }, - { 's', 246, "target-bucket", &s3_target_bucket, SOURCE_CMD_STRICT }, -#endif - /* options for backward compatibility * TODO: remove in 3.0.0 */ @@ -974,19 +959,6 @@ main(int argc, char *argv[]) compress_init(backup_subcmd); -#ifdef PBCKP_S3 - if (s3_protocol != S3_INVALID_PROTOCOL) - { - char* s3_config_file=""; - read_s3_config(s3_config_file); - } - else - { - if (s3_target_bucket != NULL) - elog(WARNING, "You cannot specify s3-target without using --s3 option with name of protocol"); - } -#endif - /* do actual operation */ switch (backup_subcmd) { @@ -1002,10 +974,6 @@ main(int argc, char *argv[]) { int err = 0; err = do_add_instance(instanceState, &instance_config); -#ifdef PBCKP_S3 - if (err == 0 && s3_protocol != S3_INVALID_PROTOCOL) - err = do_S3_write_config(instanceState); -#endif return err; } case DELETE_INSTANCE_CMD: @@ -1014,10 +982,6 @@ main(int argc, char *argv[]) { int err = 0; err = do_init(catalogState); -#ifdef PBCKP_S3 - if (err == 0 && s3_protocol != S3_INVALID_PROTOCOL) - err = S3_pre_start_check(config); -#endif return err; } case BACKUP_CMD: @@ -1034,10 +998,6 @@ main(int argc, char *argv[]) elog(ERROR, "required parameter not specified: BACKUP_MODE " "(-b, --backup-mode)"); -#ifdef PBCKP_S3 - if (s3_protocol != S3_INVALID_PROTOCOL) - return do_S3_backup(instanceState, set_backup_params, start_time); -#endif return do_backup(instanceState, set_backup_params, no_validate, no_sync, backup_logs, start_time); } @@ -1045,10 +1005,6 @@ main(int argc, char *argv[]) return do_catchup(catchup_source_pgdata, catchup_destination_pgdata, num_threads, !no_sync, exclude_absolute_paths_list, exclude_relative_paths_list); case RESTORE_CMD: -#ifdef PBCKP_S3 - if (s3_protocol != S3_INVALID_PROTOCOL) - return do_S3_restore(instanceState, current.backup_id); -#endif return do_restore_or_validate(instanceState, current.backup_id, recovery_target_options, restore_params, no_sync); @@ -1068,10 +1024,6 @@ main(int argc, char *argv[]) restore_params, no_sync); case SHOW_CMD: -#ifdef PBCKP_S3 - if (s3_protocol != S3_INVALID_PROTOCOL) - return do_S3_show(instanceState); -#endif return do_show(catalogState, instanceState, current.backup_id, show_archive); case DELETE_CMD: @@ -1259,35 +1211,4 @@ opt_exclude_path(ConfigOption *opt, const char *arg) opt_parser_add_to_parray_helper(&exclude_absolute_paths_list, arg); else opt_parser_add_to_parray_helper(&exclude_relative_paths_list, arg); -} - -#ifdef PBCKP_S3 -static S3_protocol -parse_s3_protocol(const char *value) -{ - const char *v = value; - size_t len; - - /* Skip all spaces detected */ - while (IsSpace(*v)) - v++; - len = strlen(v); - - if (len > 0 && pg_strncasecmp("MINIO", v, len) == 0) - return S3_MINIO_PROTOCOL; - if (len > 0 && pg_strncasecmp("AWS", v, len) == 0) - return S3_AWS_PROTOCOL; - else if (len > 0 && pg_strncasecmp("GOOGLE", v, len) == 0) - return S3_GOOGLE_PROTOCOL; - else if (len > 0 && pg_strncasecmp("VK", v, len) == 0) - return S3_VK_PROTOCOL; - else - return S3_INVALID_PROTOCOL; -} - -static void -opt_s3_protocol(ConfigOption *opt, const char *arg) -{ - s3_protocol = parse_s3_protocol(arg); -} -#endif +} \ No newline at end of file diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 8ee22ca65..490df4ea6 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -43,10 +43,6 @@ #include "pg_probackup_state.h" -#ifdef PBCKP_S3 -#include "../s3/s3.h" -#endif - #if defined(WIN32) && !(defined(_UCRT) && defined(__MINGW64__)) #error Windows port requires compilation in MinGW64 UCRT environment #endif @@ -810,11 +806,6 @@ extern pgBackup current; /* argv of the process */ extern char** commands_args; -#ifdef PBCKP_S3 -/* S3 options */ -extern S3_protocol s3_protocol; -#endif - /* in backup.c */ extern int do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, bool no_validate, bool no_sync, bool backup_logs, time_t start_time); @@ -825,15 +816,6 @@ extern const char *deparse_backup_mode(BackupMode mode); extern void process_block_change(ForkNumber forknum, RelFileNode rnode, BlockNumber blkno); -#ifdef PBCKP_S3 -/* in s3.c */ -extern int do_S3_backup(InstanceState *instanceState, - pgSetBackupParams *set_backup_params,time_t start_time); -extern int do_S3_show(InstanceState *instanceState); -extern int do_S3_restore(InstanceState *instanceState, time_t target_backup_id); -extern int do_S3_write_config(InstanceState *instanceState); -#endif - /* in catchup.c */ extern int do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, bool sync_dest_files, parray *exclude_absolute_paths_list, parray *exclude_relative_paths_list); diff --git a/src/utils/file.c b/src/utils/file.c index f51773e70..2b9963e5e 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -7,10 +7,6 @@ #include "file.h" #include "storage/checksum.h" -#ifdef PBCKP_S3 -#include "../s3/s3.h" -#endif - #define PRINTF_BUF_SIZE 1024 static __thread unsigned long fio_fdset = 0; @@ -3946,13 +3942,6 @@ static pioDrive_i remoteDrive; pioDrive_i pioDriveForLocation(fio_location loc) { - if (loc == FIO_CLOUD_HOST) -#ifdef PBCKP_S3 - return cloudDrive; -#else - elog(ERROR, "NO CLOUD DRIVE YET"); -#endif - if (fio_is_remote(loc)) return remoteDrive; else @@ -5560,7 +5549,4 @@ init_pio_objects(void) localDrive = bindref_pioDrive($alloc(pioLocalDrive)); remoteDrive = bindref_pioDrive($alloc(pioRemoteDrive)); -#ifdef PBCKP_S3 - create_pioCloudeDrive(); -#endif } diff --git a/src/utils/file.h b/src/utils/file.h index 5fb0bb3da..426a9bf18 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -91,7 +91,6 @@ typedef enum FIO_DB_HOST, /* data is located at Postgres server host */ FIO_BACKUP_HOST, /* data is located at backup host */ FIO_REMOTE_HOST, /* date is located at remote host */ - FIO_CLOUD_HOST /* date is located at cloud (S3) */ } fio_location; typedef enum pio_file_kind { From 8a3acdf265f63d28df5eec9c50f028e880ab63d9 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 22 Nov 2022 19:53:02 +0300 Subject: [PATCH 094/339] rework filesize division check. --- src/data.c | 11 ++++------- src/fu_util/ft_util.h | 3 +++ src/fu_util/impl/ft_impl.h | 9 +++++++++ src/utils/file.c | 3 +-- 4 files changed, 17 insertions(+), 9 deletions(-) diff --git a/src/data.c b/src/data.c index bc1f18097..41d528836 100644 --- a/src/data.c +++ b/src/data.c @@ -507,8 +507,7 @@ backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpat * NOTE This is a normal situation, if the file size has changed * since the moment we computed it. */ - file->n_blocks = (typeof(file->n_blocks))(file->size/BLCKSZ); - Assert((int64_t)file->n_blocks * BLCKSZ == file->size); + file->n_blocks = ft_div_i64u32_to_i32(file->size, BLCKSZ); /* * Skip unchanged file only if it exists in previous backup. @@ -618,8 +617,7 @@ backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpat if (backup_mode == BACKUP_MODE_FULL || backup_mode == BACKUP_MODE_DIFF_DELTA) { - file->n_blocks = (typeof(file->n_blocks))(file->read_size / BLCKSZ); - Assert((int64_t)file->n_blocks * BLCKSZ == file->read_size); + file->n_blocks = ft_div_i64u32_to_i32(file->read_size, BLCKSZ); } /* Determine that file didn`t changed in case of incremental backup */ @@ -666,7 +664,7 @@ catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpa * NOTE This is a normal situation, if the file size has changed * since the moment we computed it. */ - file->n_blocks = file->size/BLCKSZ; + file->n_blocks = ft_div_i64u32_to_i32(file->size, BLCKSZ); /* * Skip unchanged file only if it exists in destination directory. @@ -1599,8 +1597,7 @@ check_data_file(ConnectionArgs *arguments, pgFile *file, * NOTE This is a normal situation, if the file size has changed * since the moment we computed it. */ - nblocks = (typeof(nblocks))(file->size/BLCKSZ); - Assert((int64_t)nblocks * BLCKSZ == file->size); + nblocks = ft_div_i64u32_to_i32(file->size, BLCKSZ); for (blknum = 0; blknum < nblocks; blknum++) { diff --git a/src/fu_util/ft_util.h b/src/fu_util/ft_util.h index db365f99b..401495ff0 100644 --- a/src/fu_util/ft_util.h +++ b/src/fu_util/ft_util.h @@ -174,6 +174,9 @@ extern void ft_set_allocators(void *(*_realloc)(void *, size_t), ft_inline size_t ft_add_size(size_t a, size_t b); ft_inline size_t ft_mul_size(size_t a, size_t b); +/* division 64->32 bit */ +ft_inline int32_t ft_div_i64u32_to_i32(int64_t a, uint32_t b); + #define ft_new(type) ft_calloc(sizeof(type)) #define ft_newar(type, cnt) ft_calloc(ft_mul_size(sizeof(type), (cnt))) diff --git a/src/fu_util/impl/ft_impl.h b/src/fu_util/impl/ft_impl.h index 752da826f..0a252398e 100644 --- a/src/fu_util/impl/ft_impl.h +++ b/src/fu_util/impl/ft_impl.h @@ -175,6 +175,15 @@ ft_inline size_t ft_mul_size(size_t a, size_t b) { return r; } +/* division 64->32 bit */ +ft_inline int32_t ft_div_i64u32_to_i32(int64_t a, uint32_t b) { + int64_t r; + ft_assert(a >= 0); + r = a / b; + ft_assert(r <= INT32_MAX); + return (int32_t)r; +} + extern ft_gcc_malloc(ft_realloc, 1) void* ft_realloc(void* ptr, size_t new_sz); extern ft_gcc_malloc(ft_realloc, 1) void* ft_calloc(size_t sz); diff --git a/src/utils/file.c b/src/utils/file.c index 2b9963e5e..5724f3ea0 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1913,8 +1913,7 @@ fio_send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, req.arg.bitmapsize = 0; } - req.arg.nblocks = file->size/BLCKSZ; - Assert((int64_t)req.arg.nblocks * BLCKSZ == file->size); + req.arg.nblocks = ft_div_i64u32_to_i32(file->size, BLCKSZ); req.arg.segmentno = file->segno * RELSEG_SIZE; req.arg.horizonLsn = horizonLsn; req.arg.checksumVersion = checksum_version; From 622acad4bd1bca0ac442445dcda20a07f10a6811 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 24 Nov 2022 12:02:57 +0300 Subject: [PATCH 095/339] fix memory pioCopyWithFilters with new fobj_alloc_temp --- src/fu_util/fo_obj.h | 11 +++++++++++ src/fu_util/impl/fo_impl.c | 1 + src/fu_util/impl/fo_impl2.h | 27 +++++++++++++++++++++++++++ src/utils/file.c | 2 +- 4 files changed, 40 insertions(+), 1 deletion(-) diff --git a/src/fu_util/fo_obj.h b/src/fu_util/fo_obj.h index 8799b6afc..bef1e6fcf 100644 --- a/src/fu_util/fo_obj.h +++ b/src/fu_util/fo_obj.h @@ -479,6 +479,17 @@ extern fobjBool* fobj_bool(bool f); #define kls__fobjBool mth(fobjRepr, fobjFormat) fobj_klass(fobjBool); +/* + * Allocate temporary blob. + * It could be anything, and it will be automatically released. + */ +static inline void* fobj_alloc_temp(size_t buf_size); +/* get object pointer for temporary blob */ +static inline fobj_t fobj_temp2obj(void* temp); +#define fobj_temp_save(ptr) $save(fobj_temp2obj(ptr)) +#define fobj_temp_result(ptr) $result(fobj_temp2obj(ptr)) +#define fobj_temp_return(ptr) $return(fobj_temp2obj(ptr)) + /********************************** * kv */ diff --git a/src/fu_util/impl/fo_impl.c b/src/fu_util/impl/fo_impl.c index 63fa372fb..d1532b517 100644 --- a/src/fu_util/impl/fo_impl.c +++ b/src/fu_util/impl/fo_impl.c @@ -1341,6 +1341,7 @@ fobj_klass_handle(fobjInt); fobj_klass_handle(fobjUInt); fobj_klass_handle(fobjFloat); fobj_klass_handle(fobjBool); +fobj_klass_handle(fobjTempBuffer); void fobj_init(void) { diff --git a/src/fu_util/impl/fo_impl2.h b/src/fu_util/impl/fo_impl2.h index 1cac933a0..a9495e3ca 100644 --- a/src/fu_util/impl/fo_impl2.h +++ b/src/fu_util/impl/fo_impl2.h @@ -234,4 +234,31 @@ fobj_errsrc(err_i err) { #define fobj__transform_fokv(...) \ fm_eval_tuples_comma(fobj__transform_fokv_do, __VA_ARGS__) +/********************************** + * Temp Buffer - "anything" you want to be automatically cleared + */ +typedef struct fobjTempBuffer { + char buf[1]; +} fobjTempBuffer; +#define kls__fobjTempBuffer varsized(buf) +fobj_klass(fobjTempBuffer); + + +static inline void* +fobj_alloc_temp(size_t buf_size) +{ + return fobj_alloc_sized(fobjTempBuffer, buf_size)->buf; +} + +static inline fobj_t +fobj_temp2obj(void* temp) +{ + /* + * It looks dumb for the moment. + * But in future object header will not be hidden, therefore + * it will be meaningful. + */ + return (fobj_t)((char*)temp - offsetof(fobjTempBuffer, buf)); +} + #endif // FOBJ_OBJ_PRIV2_H diff --git a/src/utils/file.c b/src/utils/file.c index 4d8baa69b..562e4e565 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -5634,7 +5634,7 @@ pioCopyWithFilters(pioWriteFlush_i dest, pioRead_i src, for (i = nfilters - 1; i >= 0; i--) dest = pioWrapWriteFilter(dest, filters[i], OUT_BUF_SIZE); - buf = ft_malloc(OUT_BUF_SIZE); + buf = fobj_alloc_temp(OUT_BUF_SIZE); for (;;) { From 8ef02743f56f8f364767a48dd43131833c642d5b Mon Sep 17 00:00:00 2001 From: "s.fukanchik" Date: Fri, 25 Nov 2022 12:38:59 +0300 Subject: [PATCH 096/339] PBCKP-361 add 'noreturn' logging function --- src/utils/file.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index 562e4e565..91ed7e986 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -262,7 +262,7 @@ fio_get_agent_version(int* protocol, char* payload_buf, size_t payload_buf_size) pio_file_kind_e pio_statmode2file_kind(mode_t mode, const char* path) { - pio_file_kind_e kind; + pio_file_kind_e kind = PIO_KIND_UNKNOWN; if (S_ISREG(mode)) kind = PIO_KIND_REGULAR; else if (S_ISDIR(mode)) @@ -296,7 +296,7 @@ pio_statmode2file_kind(mode_t mode, const char* path) pio_file_kind_e pio_str2file_kind(const char* str, const char* path) { - pio_file_kind_e kind; + pio_file_kind_e kind = PIO_KIND_UNKNOWN; if (strncmp(str, "reg", 3) == 0) kind = PIO_KIND_REGULAR; else if (strncmp(str, "dir", 3) == 0) From de571722d6168cac36ac44d9ebf1c5b97f18250b Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 25 Nov 2022 12:54:56 +0300 Subject: [PATCH 097/339] temporary comment out ft_memzero to not warn Valgrind --- src/fu_util/impl/ft_impl.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/fu_util/impl/ft_impl.c b/src/fu_util/impl/ft_impl.c index 1897e6bec..d460c314e 100644 --- a/src/fu_util/impl/ft_impl.c +++ b/src/fu_util/impl/ft_impl.c @@ -75,6 +75,7 @@ ft_realloc_arr(void* ptr, size_t elem_sz, size_t old_elems, size_t new_elems) { static const uint8_t zero[4096] = {0}; void ft_memzero(void *_ptr, size_t sz) { +#ifdef OPTIMIZE_FT_MEMZERO uint8_t* ptr = _ptr; uintptr_t ptri = (uintptr_t)ptr; uintptr_t diff; @@ -98,6 +99,7 @@ ft_memzero(void *_ptr, size_t sz) { } if (sz) +#endif memset(ptr, 0, sz); } From 681ca5e5ecf196fe133587579bf21b8aed2274ad Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 25 Nov 2022 14:01:09 +0300 Subject: [PATCH 098/339] fix :-( --- src/fu_util/impl/ft_impl.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/fu_util/impl/ft_impl.c b/src/fu_util/impl/ft_impl.c index d460c314e..15f56467a 100644 --- a/src/fu_util/impl/ft_impl.c +++ b/src/fu_util/impl/ft_impl.c @@ -71,11 +71,11 @@ ft_realloc_arr(void* ptr, size_t elem_sz, size_t old_elems, size_t new_elems) { return ptr; } +#ifdef OPTIMIZE_FT_MEMZERO #define MEMZERO_BLOCK 4096 static const uint8_t zero[4096] = {0}; void ft_memzero(void *_ptr, size_t sz) { -#ifdef OPTIMIZE_FT_MEMZERO uint8_t* ptr = _ptr; uintptr_t ptri = (uintptr_t)ptr; uintptr_t diff; @@ -99,9 +99,14 @@ ft_memzero(void *_ptr, size_t sz) { } if (sz) -#endif memset(ptr, 0, sz); } +#else +void +ft_memzero(void *ptr, size_t sz) { + memset(ptr, 0, sz); +} +#endif /* String utils */ From b9a2a82a57a09d6393ac3d2f59295096f5a334bb Mon Sep 17 00:00:00 2001 From: Elena Indrupskaya Date: Mon, 28 Nov 2022 15:52:13 +0300 Subject: [PATCH 099/339] [DOC] [PGPRO-7104] Remove outdated options and PostgreSQL versions from documentation [skip travis] --- doc/pgprobackup.xml | 125 ++------------------------------------------ 1 file changed, 4 insertions(+), 121 deletions(-) diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 107a6a4f8..945c120f3 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -164,7 +164,7 @@ doc/src/sgml/pgprobackup.sgml recovery of PostgreSQL database clusters. It is designed to perform periodic backups of the PostgreSQL instance that enable you to restore the server in case of a failure. - pg_probackup supports PostgreSQL 9.6 or higher. + pg_probackup supports PostgreSQL 10 or higher. @@ -408,7 +408,7 @@ doc/src/sgml/pgprobackup.sgml - On Unix systems, for PostgreSQL 10 or lower, + On Unix systems, for PostgreSQL 10, a backup can be made only by the same OS user that has started the PostgreSQL server. For example, if PostgreSQL server is started by user postgres, the backup command must also be run @@ -596,30 +596,6 @@ pg_probackup add-instance -B backup_dir -D used for connection to the PostgreSQL server: - - For PostgreSQL 9.6: - - -BEGIN; -CREATE ROLE backup WITH LOGIN; -GRANT USAGE ON SCHEMA pg_catalog TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO backup; -COMMIT; - - - For PostgreSQL 10: - BEGIN; CREATE ROLE backup WITH LOGIN; @@ -866,7 +842,7 @@ archive_command = '"install_dir/pg_probackup" archive Setting up Backup from Standby - For PostgreSQL 9.6 or higher, pg_probackup can take backups from + pg_probackup can take backups from a standby server. This requires the following additional setup: @@ -4837,8 +4813,7 @@ pg_probackup catchup -b catchup_mode Specifies the LSN of the write-ahead log location up to which - recovery will proceed. Can be used only when restoring - a database cluster of major version 10 or higher. + recovery will proceed. @@ -5800,96 +5775,6 @@ pg_probackup catchup -b catchup_mode - - Replica Options - - This section describes the options related to taking a backup - from standby. - - - - Starting from pg_probackup 2.0.24, backups can be - taken from standby without connecting to the master server, - so these options are no longer required. In lower versions, - pg_probackup had to connect to the master to determine - recovery time — the earliest moment for which you can - restore a consistent state of the database cluster. - - - - - - - - - Deprecated. Specifies the name of the database on the master - server to connect to. The connection is used only for managing - the backup process, so you can connect to any existing - database. Can be set in the pg_probackup.conf using the - command. - - - Default: postgres, the default PostgreSQL database - - - - - - - - - Deprecated. Specifies the host name of the system on which the - master server is running. - - - - - - - - - Deprecated. Specifies the TCP port or the local Unix domain - socket file extension on which the master server is listening - for connections. - - - Default: 5432, the PostgreSQL default port - - - - - - - - - Deprecated. User name to connect as. - - - Default: postgres, - the PostgreSQL default user name - - - - - - - - - - Deprecated. Wait time for WAL segment streaming via - replication, in seconds. By default, pg_probackup waits 300 - seconds. You can also define this parameter in the - pg_probackup.conf configuration file using the - command. - - - Default: 300 sec - - - - - - @@ -6101,8 +5986,6 @@ xlog-seg-size = 16777216 pgdatabase = backupdb pghost = postgres_host pguser = backup -# Replica parameters -replica-timeout = 5min # Archive parameters archive-timeout = 5min # Logging parameters From 33a573d821234a3d63254768e7ebf21478fdac69 Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Fri, 25 Nov 2022 12:43:40 +0500 Subject: [PATCH 100/339] [PBCKP-356]: Rewriting write_backup and pgBackupWriteControl with pio. Change type of pgBackupWriteControl function to ft_str_t --- src/catalog.c | 122 +++++++++++++++++++++++++-------------------- src/pg_probackup.h | 2 +- src/show.c | 8 ++- 3 files changed, 77 insertions(+), 55 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index ee0276176..d81aced44 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -2365,104 +2365,106 @@ add_note(pgBackup *target_backup, char *note) } /* - * Write information about backup.in to stream "out". + * Write information about backup.in to ft_strbuf_t". */ -void -pgBackupWriteControl(FILE *out, pgBackup *backup, bool utc) +ft_str_t +pgBackupWriteControl(pgBackup *backup, bool utc) { char timestamp[100]; + ft_strbuf_t buf = ft_strbuf_zero(); - fio_fprintf(out, "#Configuration\n"); - fio_fprintf(out, "backup-mode = %s\n", pgBackupGetBackupMode(backup, false)); - fio_fprintf(out, "stream = %s\n", backup->stream ? "true" : "false"); - fio_fprintf(out, "compress-alg = %s\n", + ft_strbuf_catf(&buf, "#Configuration\n"); + ft_strbuf_catf(&buf, "backup-mode = %s\n", pgBackupGetBackupMode(backup, false)); + ft_strbuf_catf(&buf, "stream = %s\n", backup->stream ? "true" : "false"); + ft_strbuf_catf(&buf, "compress-alg = %s\n", deparse_compress_alg(backup->compress_alg)); - fio_fprintf(out, "compress-level = %d\n", backup->compress_level); - fio_fprintf(out, "from-replica = %s\n", backup->from_replica ? "true" : "false"); + ft_strbuf_catf(&buf, "compress-level = %d\n", backup->compress_level); + ft_strbuf_catf(&buf, "from-replica = %s\n", backup->from_replica ? "true" : "false"); - fio_fprintf(out, "\n#Compatibility\n"); - fio_fprintf(out, "block-size = %u\n", backup->block_size); - fio_fprintf(out, "xlog-block-size = %u\n", backup->wal_block_size); - fio_fprintf(out, "checksum-version = %u\n", backup->checksum_version); + ft_strbuf_catf(&buf, "\n#Compatibility\n"); + ft_strbuf_catf(&buf, "block-size = %u\n", backup->block_size); + ft_strbuf_catf(&buf, "xlog-block-size = %u\n", backup->wal_block_size); + ft_strbuf_catf(&buf, "checksum-version = %u\n", backup->checksum_version); if (backup->program_version[0] != '\0') - fio_fprintf(out, "program-version = %s\n", backup->program_version); + ft_strbuf_catf(&buf, "program-version = %s\n", backup->program_version); if (backup->server_version[0] != '\0') - fio_fprintf(out, "server-version = %s\n", backup->server_version); + ft_strbuf_catf(&buf, "server-version = %s\n", backup->server_version); - fio_fprintf(out, "\n#Result backup info\n"); - fio_fprintf(out, "timelineid = %d\n", backup->tli); + ft_strbuf_catf(&buf, "\n#Result backup info\n"); + ft_strbuf_catf(&buf, "timelineid = %d\n", backup->tli); /* LSN returned by pg_start_backup */ - fio_fprintf(out, "start-lsn = %X/%X\n", + ft_strbuf_catf(&buf, "start-lsn = %X/%X\n", (uint32) (backup->start_lsn >> 32), (uint32) backup->start_lsn); /* LSN returned by pg_stop_backup */ - fio_fprintf(out, "stop-lsn = %X/%X\n", + ft_strbuf_catf(&buf, "stop-lsn = %X/%X\n", (uint32) (backup->stop_lsn >> 32), (uint32) backup->stop_lsn); time2iso(timestamp, lengthof(timestamp), backup->start_time, utc); - fio_fprintf(out, "start-time = '%s'\n", timestamp); + ft_strbuf_catf(&buf, "start-time = '%s'\n", timestamp); if (backup->merge_time > 0) { time2iso(timestamp, lengthof(timestamp), backup->merge_time, utc); - fio_fprintf(out, "merge-time = '%s'\n", timestamp); + ft_strbuf_catf(&buf, "merge-time = '%s'\n", timestamp); } if (backup->end_time > 0) { time2iso(timestamp, lengthof(timestamp), backup->end_time, utc); - fio_fprintf(out, "end-time = '%s'\n", timestamp); + ft_strbuf_catf(&buf, "end-time = '%s'\n", timestamp); } - fio_fprintf(out, "recovery-xid = " XID_FMT "\n", backup->recovery_xid); + ft_strbuf_catf(&buf, "recovery-xid = " XID_FMT "\n", backup->recovery_xid); if (backup->recovery_time > 0) { time2iso(timestamp, lengthof(timestamp), backup->recovery_time, utc); - fio_fprintf(out, "recovery-time = '%s'\n", timestamp); + ft_strbuf_catf(&buf, "recovery-time = '%s'\n", timestamp); } if (backup->expire_time > 0) { time2iso(timestamp, lengthof(timestamp), backup->expire_time, utc); - fio_fprintf(out, "expire-time = '%s'\n", timestamp); + ft_strbuf_catf(&buf, "expire-time = '%s'\n", timestamp); } if (backup->merge_dest_backup != 0) - fio_fprintf(out, "merge-dest-id = '%s'\n", base36enc(backup->merge_dest_backup)); + ft_strbuf_catf(&buf, "merge-dest-id = '%s'\n", base36enc(backup->merge_dest_backup)); /* * Size of PGDATA directory. The size does not include size of related * WAL segments in archive 'wal' directory. */ if (backup->data_bytes != BYTES_INVALID) - fio_fprintf(out, "data-bytes = " INT64_FORMAT "\n", backup->data_bytes); + ft_strbuf_catf(&buf, "data-bytes = " INT64_FORMAT "\n", backup->data_bytes); if (backup->wal_bytes != BYTES_INVALID) - fio_fprintf(out, "wal-bytes = " INT64_FORMAT "\n", backup->wal_bytes); + ft_strbuf_catf(&buf, "wal-bytes = " INT64_FORMAT "\n", backup->wal_bytes); if (backup->uncompressed_bytes >= 0) - fio_fprintf(out, "uncompressed-bytes = " INT64_FORMAT "\n", backup->uncompressed_bytes); + ft_strbuf_catf(&buf, "uncompressed-bytes = " INT64_FORMAT "\n", backup->uncompressed_bytes); if (backup->pgdata_bytes >= 0) - fio_fprintf(out, "pgdata-bytes = " INT64_FORMAT "\n", backup->pgdata_bytes); + ft_strbuf_catf(&buf, "pgdata-bytes = " INT64_FORMAT "\n", backup->pgdata_bytes); - fio_fprintf(out, "status = %s\n", status2str(backup->status)); + ft_strbuf_catf(&buf, "status = %s\n", status2str(backup->status)); /* 'parent_backup' is set if it is incremental backup */ if (backup->parent_backup != 0) - fio_fprintf(out, "parent-backup-id = '%s'\n", base36enc(backup->parent_backup)); + ft_strbuf_catf(&buf, "parent-backup-id = '%s'\n", base36enc(backup->parent_backup)); /* print connection info except password */ if (backup->primary_conninfo) - fio_fprintf(out, "primary_conninfo = '%s'\n", backup->primary_conninfo); + ft_strbuf_catf(&buf, "primary_conninfo = '%s'\n", backup->primary_conninfo); /* print external directories list */ if (backup->external_dir_str) - fio_fprintf(out, "external-dirs = '%s'\n", backup->external_dir_str); + ft_strbuf_catf(&buf, "external-dirs = '%s'\n", backup->external_dir_str); if (backup->note) - fio_fprintf(out, "note = '%s'\n", backup->note); + ft_strbuf_catf(&buf, "note = '%s'\n", backup->note); if (backup->content_crc != 0) - fio_fprintf(out, "content-crc = %u\n", backup->content_crc); + ft_strbuf_catf(&buf, "content-crc = %u\n", backup->content_crc); + return ft_strbuf_steal(&buf); } /* @@ -2474,29 +2476,38 @@ pgBackupWriteControl(FILE *out, pgBackup *backup, bool utc) void write_backup(pgBackup *backup, bool strict) { - FILE *fp = NULL; - char path[MAXPGPATH]; + FOBJ_FUNC_ARP(); + + pioFile_i out; char path_temp[MAXPGPATH]; - char buf[8192]; + char path[MAXPGPATH]; + err_i err = $noerr(); + + pioDrive_i backup_drive = pioDriveForLocation(FIO_BACKUP_HOST); join_path_components(path, backup->root_dir, BACKUP_CONTROL_FILE); snprintf(path_temp, sizeof(path_temp), "%s.tmp", path); - fp = fopen(path_temp, PG_BINARY_W); - if (fp == NULL) - elog(ERROR, "Cannot open control file \"%s\": %s", - path_temp, strerror(errno)); + out = $i(pioOpen, backup_drive, path_temp, + .flags = O_WRONLY | O_CREAT | O_EXCL | PG_BINARY, + .err = &err); - if (chmod(path_temp, FILE_PERMISSION) == -1) - elog(ERROR, "Cannot change mode of \"%s\": %s", path_temp, - strerror(errno)); + if ($noerr(err)) + { + size_t length; + ft_str_t buf = pgBackupWriteControl(backup, true); + length = $i(pioWrite, out, ft_bytes(buf.ptr, buf.len), &err); - setvbuf(fp, buf, _IOFBF, sizeof(buf)); + ft_free((char*)buf.ptr); - pgBackupWriteControl(fp, backup, true); + if (length != buf.len) + elog(ERROR, "Incorrect size of writing data"); + } + else + elog(ERROR, "Failed to open file \"%s\" ", path_temp); - /* Ignore 'out of space' error in lax mode */ - if (fflush(fp) != 0) + err = $i(pioWriteFinish, out); + if ($haserr(err)) { int elevel = ERROR; int save_errno = errno; @@ -2509,16 +2520,21 @@ write_backup(pgBackup *backup, bool strict) if (!strict && (save_errno == ENOSPC)) { - fclose(fp); - if (fio_remove(FIO_BACKUP_HOST, path_temp, false) != 0) + err = $i(pioClose, out); + $i(pioRemove, backup_drive, path_temp, false); + if ($haserr(err)) elog(elevel, "Additionally cannot remove file \"%s\": %s", path_temp, strerror(errno)); return; } } - if (fclose(fp) != 0) + /* Ignore 'out of space' error in lax mode */ + err = $i(pioClose, out); + if ($haserr(err)) + { elog(ERROR, "Cannot close control file \"%s\": %s", path_temp, strerror(errno)); + } if (fio_sync(FIO_BACKUP_HOST, path_temp) < 0) elog(ERROR, "Cannot sync control file \"%s\": %s", diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 04443cf10..73efb5d8a 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -958,7 +958,7 @@ extern void do_set_backup(InstanceState *instanceState, time_t backup_id, extern void pin_backup(pgBackup *target_backup, pgSetBackupParams *set_backup_params); extern void add_note(pgBackup *target_backup, char *note); -extern void pgBackupWriteControl(FILE *out, pgBackup *backup, bool utc); +extern ft_str_t pgBackupWriteControl(pgBackup *backup, bool utc); extern void write_backup_filelist(pgBackup *backup, parray *files, const char *root, parray *external_list, bool sync); diff --git a/src/show.c b/src/show.c index c97b39ccf..a2bc6b696 100644 --- a/src/show.c +++ b/src/show.c @@ -518,7 +518,13 @@ show_backup(InstanceState *instanceState, time_t requested_backup_id) } if (show_format == SHOW_PLAIN) - pgBackupWriteControl(stdout, backup, false); + { + ft_str_t buf = pgBackupWriteControl(backup, false); + + fwrite(buf.ptr, 1, buf.len, stdout); + + ft_free((char*)buf.ptr); + } else elog(ERROR, "Invalid show format %d", (int) show_format); From dee76b45ecceba73838ae0e7bb9f50d35e22b717 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 28 Nov 2022 05:54:21 +0300 Subject: [PATCH 101/339] [PBCKP-368] add pioWriteFile for writting small files --- src/fu_util/ft_util.h | 1 + src/fu_util/impl/ft_impl.h | 30 +++++---- src/utils/file.c | 135 +++++++++++++++++++++++++++++++++++-- src/utils/file.h | 8 ++- 4 files changed, 157 insertions(+), 17 deletions(-) diff --git a/src/fu_util/ft_util.h b/src/fu_util/ft_util.h index 401495ff0..d4bdf0978 100644 --- a/src/fu_util/ft_util.h +++ b/src/fu_util/ft_util.h @@ -418,6 +418,7 @@ ft_inline bool ft_strbuf_ensure(ft_strbuf_t *buf, size_t n); /* All functions below returns false if fixed buffer was overflowed */ ft_inline bool ft_strbuf_may (ft_strbuf_t *buf); ft_inline bool ft_strbuf_cat (ft_strbuf_t *buf, ft_str_t s); +ft_inline bool ft_strbuf_catbytes(ft_strbuf_t *buf, ft_bytes_t b); ft_inline bool ft_strbuf_cat1 (ft_strbuf_t *buf, char c); ft_inline bool ft_strbuf_cat2 (ft_strbuf_t *buf, char c1, char c2); ft_inline bool ft_strbuf_catc (ft_strbuf_t *buf, const char *s); diff --git a/src/fu_util/impl/ft_impl.h b/src/fu_util/impl/ft_impl.h index 0a252398e..8c07ff9de 100644 --- a/src/fu_util/impl/ft_impl.h +++ b/src/fu_util/impl/ft_impl.h @@ -426,18 +426,24 @@ ft_strbuf_ensure(ft_strbuf_t *buf, size_t n) { ft_inline bool ft_strbuf_cat(ft_strbuf_t *buf, ft_str_t s) { - if (!ft_strbuf_may(buf)) - return false; - if (s.len == 0) - return true; - if (!ft_strbuf_ensure(buf, s.len)) { - s.len = buf->cap - buf->len; - ft_assert(s.len > 0); - } - memmove(buf->ptr + buf->len, s.ptr, s.len); - buf->len += s.len; - buf->ptr[buf->len] = '\0'; - return ft_strbuf_may(buf); + /* we could actually reuse ft_strbuf_catbytes */ + return ft_strbuf_catbytes(buf, ft_bytes(s.ptr, s.len)); +} + +ft_inline bool +ft_strbuf_catbytes(ft_strbuf_t *buf, ft_bytes_t s) { + if (!ft_strbuf_may(buf)) + return false; + if (s.len == 0) + return true; + if (!ft_strbuf_ensure(buf, s.len)) { + s.len = buf->cap - buf->len; + ft_assert(s.len > 0); + } + memmove(buf->ptr + buf->len, s.ptr, s.len); + buf->len += s.len; + buf->ptr[buf->len] = '\0'; + return ft_strbuf_may(buf); } ft_inline bool diff --git a/src/utils/file.c b/src/utils/file.c index 91ed7e986..0c51c423b 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -3742,6 +3742,26 @@ fio_communicate(int in, int out) } ft_bytes_free(&bytes); break; + case FIO_WRITE_FILE_AT_ONCE: + bytes = ft_bytes(buf, hdr.size); + ft_bytes_consume(&bytes, strlen(buf)+1); + err = $i(pioWriteFile, drive, .path = buf, + .content = bytes, .binary = hdr.arg); + if ($haserr(err)) + { + const char *msg = $errmsg(err); + hdr.arg = getErrno(err); + hdr.size = strlen(msg) + 1; + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_write_all(out, msg, hdr.size), hdr.size); + } + else + { + hdr.arg = 0; + hdr.size = 0; + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); + } + break; case FIO_ACCESS: /* Check presence of file with specified name */ hdr.size = 0; hdr.arg = access(buf, hdr.arg) < 0 ? errno : 0; @@ -4194,19 +4214,21 @@ pioLocalDrive_pioReadFile(VSelf, path_t path, bool binary, err_i* err) if (st.pst_kind != PIO_KIND_REGULAR) { *err = $err(RT, "File {path:q} is not regular: {kind}", path(path), - kind(pio_file_kind2str(st.pst_kind, path))); + kind(pio_file_kind2str(st.pst_kind, path)), + errNo(EACCES)); $iresult(*err); return res; } /* forbid too large file because of remote protocol */ - if (st.pst_size >= INT32_MAX) + if (st.pst_size >= PIO_READ_WRITE_FILE_LIMIT) { *err = $err(RT, "File {path:q} is too large: {size}", path(path), - size(st.pst_size), errNo(ENOMEM)); + size(st.pst_size), errNo(EFBIG)); $iresult(*err); return res; } + if (binary) res = ft_bytes_alloc(st.pst_size); else @@ -4256,6 +4278,60 @@ pioLocalDrive_pioReadFile(VSelf, path_t path, bool binary, err_i* err) return res; } +static err_i +pioLocalDrive_pioWriteFile(VSelf, path_t path, ft_bytes_t content, bool binary) +{ + FOBJ_FUNC_ARP(); + Self(pioLocalDrive); + pioFile_i fl; + size_t amount; + err_i err; + + fobj_reset_err(&err); + + if (content.len > PIO_READ_WRITE_FILE_LIMIT) + { + err = $err(RT, "File content too large {path:q}: {size}", + path(path), size(content.len), errNo(EOVERFLOW)); + return $iresult(err); + } + + /* + * rely on "local file is read whole at once always". + * Is it true? + */ + fl = $(pioOpen, self, .path = path, + .flags = O_WRONLY | O_CREAT | O_TRUNC | (binary ? PG_BINARY : 0), + .err = &err); + if ($haserr(err)) + return $iresult(err); + + amount = $i(pioWrite, fl, .buf = content, .err = &err); + if ($haserr(err)) + return $iresult(err); + + if (amount != content.len) + { + err = $err(RT, "File {path:q} is truncated while reading", + path(path), errNo(EBUSY)); + $iresult(err); + return err; + } + + err = $i(pioWriteFinish, fl); + if ($haserr(err)) + return $iresult(err); + + err = $i(pioClose, fl); + if ($haserr(err)) + { + $(pioRemove, self, .path = path); + return $iresult(err); + } + + return $noerr(); +} + /* LOCAL FILE */ static void pioLocalFile_fobjDispose(VSelf) @@ -4673,7 +4749,7 @@ static ft_bytes_t pioRemoteDrive_pioReadFile(VSelf, path_t path, bool binary, err_i* err) { FOBJ_FUNC_ARP(); - Self(pioLocalDrive); + Self(pioRemoteDrive); ft_bytes_t res; fobj_reset_err(err); @@ -4706,6 +4782,57 @@ pioRemoteDrive_pioReadFile(VSelf, path_t path, bool binary, err_i* err) return res; } +static err_i +pioRemoteDrive_pioWriteFile(VSelf, path_t path, ft_bytes_t content, bool binary) +{ + FOBJ_FUNC_ARP(); + Self(pioLocalDrive); + fio_header hdr; + ft_bytes_t msg; + err_i err; + ft_strbuf_t buf = ft_strbuf_zero(); + + fobj_reset_err(&err); + + if (content.len > PIO_READ_WRITE_FILE_LIMIT) + { + err = $err(RT, "File content too large {path:q}: {size}", + path(path), size(content.len), errNo(EOVERFLOW)); + return $iresult(err); + } + + ft_strbuf_catc(&buf, path); + ft_strbuf_cat1(&buf, '\x00'); + ft_strbuf_catbytes(&buf, content); + + hdr = (fio_header){ + .cop = FIO_WRITE_FILE_AT_ONCE, + .handle = -1, + .size = buf.len, + .arg = binary, + }; + + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_write_all(fio_stdout, buf.ptr, buf.len), buf.len); + + ft_strbuf_free(&buf); + + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + ft_dbg_assert(hdr.cop == FIO_WRITE_FILE_AT_ONCE); + + if (hdr.arg != 0) + { + msg = ft_bytes_alloc(hdr.size); + IO_CHECK(fio_read_all(fio_stdin, msg.ptr, hdr.size), hdr.size); + err = $syserr((int)hdr.arg, "Could not write remote file {path:q}: {causeStr}", + path(path), causeStr(msg.ptr)); + ft_bytes_free(&msg); + return $iresult(err); + } + + return $noerr(); +} + /* REMOTE FILE */ static err_i diff --git a/src/utils/file.h b/src/utils/file.h index 5e7254348..315dbb54b 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -70,6 +70,7 @@ typedef enum FIO_PAGE_ZERO, FIO_FILES_ARE_SAME, FIO_READ_FILE_AT_ONCE, + FIO_WRITE_FILE_AT_ONCE, } fio_operations; typedef struct @@ -278,9 +279,13 @@ fobj_iface(pioReadCloser); (bool, handle_tablespaces), (bool, symlink_and_hidden), \ (bool, backup_logs), (bool, skip_hidden), (int, external_dir_num) #define mth__pioRemoveDir void, (const char *, root), (bool, root_as_well) +/* pioReadFile and pioWriteFile should be used only for small files */ +#define PIO_READ_WRITE_FILE_LIMIT (1024*1024) #define mth__pioReadFile ft_bytes_t, (path_t, path), (bool, binary), \ (err_i *, err) #define mth__pioReadFile__optional() (binary, true) +#define mth__pioWriteFile err_i, (path_t, path), (ft_bytes_t, content), (bool, binary) +#define mth__pioWriteFile__optional() (binary, true) fobj_method(pioOpen); fobj_method(pioStat); @@ -294,11 +299,12 @@ fobj_method(pioFilesAreSame); fobj_method(pioListDir); fobj_method(pioRemoveDir); fobj_method(pioReadFile); +fobj_method(pioWriteFile); #define iface__pioDrive mth(pioOpen, pioStat, pioRemove, pioRename), \ mth(pioExists, pioGetCRC32, pioIsRemote), \ mth(pioMakeDir, pioListDir, pioRemoveDir), \ - mth(pioFilesAreSame), mth(pioReadFile) + mth(pioFilesAreSame, pioReadFile, pioWriteFile) fobj_iface(pioDrive); extern pioDrive_i pioDriveForLocation(fio_location location); From 8b1111fca5d3eb17c8aba0d5f482d306670e6677 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 28 Nov 2022 06:16:45 +0300 Subject: [PATCH 102/339] [PBCKP-368] move dire_list_file to file.c just refactoring because dir_list_file is not used in other places anymore. --- src/dir.c | 190 -------------------------------------------- src/pg_probackup.h | 3 - src/utils/file.c | 194 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 194 insertions(+), 193 deletions(-) diff --git a/src/dir.c b/src/dir.c index a5462d38b..29003d880 100644 --- a/src/dir.c +++ b/src/dir.c @@ -16,7 +16,6 @@ #if PG_VERSION_NUM < 110000 #include "catalog/catalog.h" #endif -#include "catalog/pg_tablespace.h" #include #include @@ -125,9 +124,6 @@ typedef struct exclude_cb_ctx { static char dir_check_file(pgFile *file, bool backup_logs); -static void dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, - bool handle_tablespaces, bool follow_symlink, bool backup_logs, - bool skip_hidden, int external_dir_num, fio_location location); static void opt_path_map(ConfigOption *opt, const char *arg, TablespaceList *list, const char *type); static void cleanup_tablespace(const char *path); @@ -371,47 +367,6 @@ db_map_entry_free(void *entry) free(entry); } -/* - * List files, symbolic links and directories in the directory "root" and add - * pgFile objects to "files". We add "root" to "files" if add_root is true. - * - * When follow_symlink is true, symbolic link is ignored and only file or - * directory linked to will be listed. - * - * TODO: make it strictly local - */ -void -dir_list_file(parray *files, const char *root, bool handle_tablespaces, bool follow_symlink, - bool backup_logs, bool skip_hidden, int external_dir_num, fio_location location) -{ - pgFile *file; - - file = pgFileNew(root, "", follow_symlink, external_dir_num, location); - if (file == NULL) - { - /* For external directory this is not ok */ - if (external_dir_num > 0) - elog(ERROR, "External directory is not found: \"%s\"", root); - else - return; - } - - if (file->kind != PIO_KIND_DIRECTORY) - { - if (external_dir_num > 0) - elog(ERROR, " --external-dirs option \"%s\": directory or symbolic link expected", - root); - else - elog(WARNING, "Skip \"%s\": unexpected file format", root); - return; - } - - dir_list_file_internal(files, file, root, handle_tablespaces, follow_symlink, - backup_logs, skip_hidden, external_dir_num, location); - - pgFileFree(file); -} - #define CHECK_FALSE 0 #define CHECK_TRUE 1 #define CHECK_EXCLUDE_FALSE 2 @@ -591,151 +546,6 @@ void exclude_files(parray *files, bool backup_logs) { parray_remove_if(files, exclude_files_cb, (void*)&ctx, pgFileFree); } -/* - * List files in parent->path directory. - * If "handle_tablespaces" is true, handle recursive tablespaces - * and the ones located inside pgdata. - * If "follow_symlink" is true, follow symlinks so that the - * fio_stat call fetches the info from the file pointed to by the - * symlink, not from the symlink itself. - * - * TODO: should we check for interrupt here ? - */ -static void -dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, - bool handle_tablespaces, bool follow_symlink, bool backup_logs, - bool skip_hidden, int external_dir_num, fio_location location) -{ - DIR *dir; - struct dirent *dent; - bool in_tablespace = false; - - if (parent->kind != PIO_KIND_DIRECTORY) - elog(ERROR, "\"%s\" is not a directory", parent_dir); - - in_tablespace = path_is_prefix_of_path(PG_TBLSPC_DIR, parent->rel_path); - - /* Open directory and list contents */ - dir = fio_opendir(location, parent_dir); - if (dir == NULL) - { - if (errno == ENOENT) - { - /* Maybe the directory was removed */ - return; - } - elog(ERROR, "Cannot open directory \"%s\": %s", - parent_dir, strerror(errno)); - } - - errno = 0; - while ((dent = fio_readdir(dir))) - { - pgFile *file; - char child[MAXPGPATH]; - char rel_child[MAXPGPATH]; - - join_path_components(child, parent_dir, dent->d_name); - join_path_components(rel_child, parent->rel_path, dent->d_name); - - file = pgFileNew(child, rel_child, follow_symlink, - external_dir_num, location); - if (file == NULL) - continue; - - /* Skip entries point current dir or parent dir */ - if (file->kind == PIO_KIND_DIRECTORY && - (strcmp(dent->d_name, ".") == 0 || strcmp(dent->d_name, "..") == 0)) - { - pgFileFree(file); - continue; - } - - /* skip hidden files and directories */ - if (skip_hidden && file->name[0] == '.') { - elog(WARNING, "Skip hidden file: '%s'", child); - pgFileFree(file); - continue; - } - - /* - * Add only files, directories and links. Skip sockets and other - * unexpected file formats. - */ - if (file->kind != PIO_KIND_DIRECTORY && file->kind != PIO_KIND_REGULAR) - { - elog(WARNING, "Skip '%s': unexpected file format", child); - pgFileFree(file); - continue; - } - - if(handle_tablespaces) { - /* - * Do not copy tablespaces twice. It may happen if the tablespace is located - * inside the PGDATA. - */ - if (file->kind == PIO_KIND_DIRECTORY && - strcmp(file->name, TABLESPACE_VERSION_DIRECTORY) == 0) - { - Oid tblspcOid; - char tmp_rel_path[MAXPGPATH]; - int sscanf_res; - - /* - * Valid path for the tablespace is - * pg_tblspc/tblsOid/TABLESPACE_VERSION_DIRECTORY - */ - if (!path_is_prefix_of_path(PG_TBLSPC_DIR, file->rel_path)) - continue; - sscanf_res = sscanf(file->rel_path, PG_TBLSPC_DIR "/%u/%s", - &tblspcOid, tmp_rel_path); - if (sscanf_res == 0) - continue; - } - - if (in_tablespace) { - char tmp_rel_path[MAXPGPATH]; - ssize_t sscanf_res; - - sscanf_res = sscanf(file->rel_path, PG_TBLSPC_DIR "/%u/%[^/]/%u/", - &(file->tblspcOid), tmp_rel_path, - &(file->dbOid)); - - /* - * We should skip other files and directories rather than - * TABLESPACE_VERSION_DIRECTORY, if this is recursive tablespace. - */ - if (sscanf_res == 2 && strcmp(tmp_rel_path, TABLESPACE_VERSION_DIRECTORY) != 0) - continue; - } else if (path_is_prefix_of_path("global", file->rel_path)) { - file->tblspcOid = GLOBALTABLESPACE_OID; - } else if (path_is_prefix_of_path("base", file->rel_path)) { - file->tblspcOid = DEFAULTTABLESPACE_OID; - sscanf(file->rel_path, "base/%u/", &(file->dbOid)); - } - } - - parray_append(files, file); - - /* - * If the entry is a directory call dir_list_file_internal() - * recursively. - */ - if (file->kind == PIO_KIND_DIRECTORY) - dir_list_file_internal(files, file, child, handle_tablespaces, follow_symlink, - backup_logs, skip_hidden, external_dir_num, location); - } - - if (errno && errno != ENOENT) - { - int errno_tmp = errno; - fio_closedir(dir); - elog(ERROR, "Cannot read directory \"%s\": %s", - parent_dir, strerror(errno_tmp)); - } - fio_closedir(dir); -} - /* * Retrieve tablespace path, either relocated or original depending on whether * -T was passed or not. diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 73efb5d8a..7df0d04e7 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -995,9 +995,6 @@ extern const char* deparse_compress_alg(int alg); extern bool get_control_value_int64(const char *str, const char *name, int64 *value_int64, bool is_mandatory); extern bool get_control_value_str(const char *str, const char *name, char *value_str, size_t value_str_size, bool is_mandatory); -extern void dir_list_file(parray *files, const char *root, bool handle_tablespaces, - bool follow_symlink, bool backup_logs, bool skip_hidden, - int external_dir_num, fio_location location); extern const char *get_tablespace_mapping(const char *dir); extern void create_data_directories(parray *dest_files, diff --git a/src/utils/file.c b/src/utils/file.c index 0c51c423b..4d4bae888 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -5,6 +5,7 @@ #include #include "file.h" +#include "catalog/pg_tablespace.h" #include "storage/checksum.h" #define PRINTF_BUF_SIZE 1024 @@ -90,6 +91,13 @@ typedef struct #undef fopen #endif +static void dir_list_file(parray *files, const char *root, bool handle_tablespaces, + bool follow_symlink, bool backup_logs, bool skip_hidden, + int external_dir_num, fio_location location); +static void dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, + bool handle_tablespaces, bool follow_symlink, bool backup_logs, + bool skip_hidden, int external_dir_num, fio_location location); + void setMyLocation(ProbackupSubcmd const subcmd) { @@ -3271,6 +3279,192 @@ fio_remove_dir_impl(int out, char* buf) { IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); } +/* + * List files, symbolic links and directories in the directory "root" and add + * pgFile objects to "files". We add "root" to "files" if add_root is true. + * + * When follow_symlink is true, symbolic link is ignored and only file or + * directory linked to will be listed. + * + * TODO: make it strictly local + */ +static void +dir_list_file(parray *files, const char *root, bool handle_tablespaces, bool follow_symlink, + bool backup_logs, bool skip_hidden, int external_dir_num, fio_location location) +{ + pgFile *file; + + file = pgFileNew(root, "", follow_symlink, external_dir_num, location); + if (file == NULL) + { + /* For external directory this is not ok */ + if (external_dir_num > 0) + elog(ERROR, "External directory is not found: \"%s\"", root); + else + return; + } + + if (file->kind != PIO_KIND_DIRECTORY) + { + if (external_dir_num > 0) + elog(ERROR, " --external-dirs option \"%s\": directory or symbolic link expected", + root); + else + elog(WARNING, "Skip \"%s\": unexpected file format", root); + return; + } + + dir_list_file_internal(files, file, root, handle_tablespaces, follow_symlink, + backup_logs, skip_hidden, external_dir_num, location); + + pgFileFree(file); +} + +/* + * List files in parent->path directory. + * If "handle_tablespaces" is true, handle recursive tablespaces + * and the ones located inside pgdata. + * If "follow_symlink" is true, follow symlinks so that the + * fio_stat call fetches the info from the file pointed to by the + * symlink, not from the symlink itself. + * + * TODO: should we check for interrupt here ? + */ +static void +dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, + bool handle_tablespaces, bool follow_symlink, bool backup_logs, + bool skip_hidden, int external_dir_num, fio_location location) +{ + DIR *dir; + struct dirent *dent; + bool in_tablespace = false; + + if (parent->kind != PIO_KIND_DIRECTORY) + elog(ERROR, "\"%s\" is not a directory", parent_dir); + + in_tablespace = path_is_prefix_of_path(PG_TBLSPC_DIR, parent->rel_path); + + /* Open directory and list contents */ + dir = fio_opendir(location, parent_dir); + if (dir == NULL) + { + if (errno == ENOENT) + { + /* Maybe the directory was removed */ + return; + } + elog(ERROR, "Cannot open directory \"%s\": %s", + parent_dir, strerror(errno)); + } + + errno = 0; + while ((dent = fio_readdir(dir))) + { + pgFile *file; + char child[MAXPGPATH]; + char rel_child[MAXPGPATH]; + + join_path_components(child, parent_dir, dent->d_name); + join_path_components(rel_child, parent->rel_path, dent->d_name); + + file = pgFileNew(child, rel_child, follow_symlink, + external_dir_num, location); + if (file == NULL) + continue; + + /* Skip entries point current dir or parent dir */ + if (file->kind == PIO_KIND_DIRECTORY && + (strcmp(dent->d_name, ".") == 0 || strcmp(dent->d_name, "..") == 0)) + { + pgFileFree(file); + continue; + } + + /* skip hidden files and directories */ + if (skip_hidden && file->name[0] == '.') { + elog(WARNING, "Skip hidden file: '%s'", child); + pgFileFree(file); + continue; + } + + /* + * Add only files, directories and links. Skip sockets and other + * unexpected file formats. + */ + if (file->kind != PIO_KIND_DIRECTORY && file->kind != PIO_KIND_REGULAR) + { + elog(WARNING, "Skip '%s': unexpected file format", child); + pgFileFree(file); + continue; + } + + if(handle_tablespaces) { + /* + * Do not copy tablespaces twice. It may happen if the tablespace is located + * inside the PGDATA. + */ + if (file->kind == PIO_KIND_DIRECTORY && + strcmp(file->name, TABLESPACE_VERSION_DIRECTORY) == 0) + { + Oid tblspcOid; + char tmp_rel_path[MAXPGPATH]; + int sscanf_res; + + /* + * Valid path for the tablespace is + * pg_tblspc/tblsOid/TABLESPACE_VERSION_DIRECTORY + */ + if (!path_is_prefix_of_path(PG_TBLSPC_DIR, file->rel_path)) + continue; + sscanf_res = sscanf(file->rel_path, PG_TBLSPC_DIR "/%u/%s", + &tblspcOid, tmp_rel_path); + if (sscanf_res == 0) + continue; + } + + if (in_tablespace) { + char tmp_rel_path[MAXPGPATH]; + ssize_t sscanf_res; + + sscanf_res = sscanf(file->rel_path, PG_TBLSPC_DIR "/%u/%[^/]/%u/", + &(file->tblspcOid), tmp_rel_path, + &(file->dbOid)); + + /* + * We should skip other files and directories rather than + * TABLESPACE_VERSION_DIRECTORY, if this is recursive tablespace. + */ + if (sscanf_res == 2 && strcmp(tmp_rel_path, TABLESPACE_VERSION_DIRECTORY) != 0) + continue; + } else if (path_is_prefix_of_path("global", file->rel_path)) { + file->tblspcOid = GLOBALTABLESPACE_OID; + } else if (path_is_prefix_of_path("base", file->rel_path)) { + file->tblspcOid = DEFAULTTABLESPACE_OID; + sscanf(file->rel_path, "base/%u/", &(file->dbOid)); + } + } + + parray_append(files, file); + + /* + * If the entry is a directory call dir_list_file_internal() + * recursively. + */ + if (file->kind == PIO_KIND_DIRECTORY) + dir_list_file_internal(files, file, child, handle_tablespaces, follow_symlink, + backup_logs, skip_hidden, external_dir_num, location); + } + + if (errno && errno != ENOENT) + { + int errno_tmp = errno; + fio_closedir(dir); + elog(ERROR, "Cannot read directory \"%s\": %s", + parent_dir, strerror(errno_tmp)); + } + fio_closedir(dir); +} + /* * To get the arrays of files we use the same function dir_list_file(), * that is used for local backup. From 646fe9e083d93713725f34dd18fee5ddc9bfb197 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 28 Nov 2022 06:44:14 +0300 Subject: [PATCH 103/339] [PBCKP-368] make pgFileNew accept drive instead of location --- src/backup.c | 4 ++-- src/dir.c | 10 +++++----- src/pg_probackup.h | 2 +- src/stream.c | 6 ++++-- src/utils/file.c | 39 ++++++++++++++++++++++----------------- 5 files changed, 34 insertions(+), 27 deletions(-) diff --git a/src/backup.c b/src/backup.c index 3bd6c8c13..02201338f 100644 --- a/src/backup.c +++ b/src/backup.c @@ -1815,6 +1815,7 @@ pg_stop_backup_write_file_helper(const char *path, const char *filename, const c FILE *fp; pgFile *file; char full_filename[MAXPGPATH]; + pioDrive_i drive = pioDriveForLocation(FIO_BACKUP_HOST); join_path_components(full_filename, path, filename); fp = fio_fopen(FIO_BACKUP_HOST, full_filename, PG_BINARY_W); @@ -1834,8 +1835,7 @@ pg_stop_backup_write_file_helper(const char *path, const char *filename, const c */ if (file_list) { - file = pgFileNew(full_filename, filename, true, 0, - FIO_BACKUP_HOST); + file = pgFileNew(full_filename, filename, true, 0, drive); if (file->kind == PIO_KIND_REGULAR) { diff --git a/src/dir.c b/src/dir.c index 29003d880..070499bb0 100644 --- a/src/dir.c +++ b/src/dir.c @@ -139,7 +139,7 @@ static TablespaceList external_remap_list = {NULL, NULL}; pgFile * pgFileNew(const char *path, const char *rel_path, bool follow_symlink, - int external_dir_num, fio_location location) + int external_dir_num, pioDrive_i drive) { FOBJ_FUNC_ARP(); pio_stat_t st; @@ -147,8 +147,8 @@ pgFileNew(const char *path, const char *rel_path, bool follow_symlink, err_i err; /* stat the file */ - st = $i(pioStat, pioDriveForLocation(location), .path = path, - .follow_symlink = follow_symlink, .err = &err); + st = $i(pioStat, drive, .path = path, .follow_symlink = follow_symlink, + .err = &err); if ($haserr(err)) { /* file not found is not an error case */ if (getErrno(err) == ENOENT) @@ -1450,6 +1450,7 @@ write_database_map(pgBackup *backup, parray *database_map, parray *backup_files_ pgFile *file; char database_dir[MAXPGPATH]; char database_map_path[MAXPGPATH]; + pioDrive_i drive = pioDriveForLocation(FIO_BACKUP_HOST); join_path_components(database_dir, backup->root_dir, DATABASE_DIR); join_path_components(database_map_path, database_dir, DATABASE_MAP); @@ -1470,8 +1471,7 @@ write_database_map(pgBackup *backup, parray *database_map, parray *backup_files_ } /* Add metadata to backup_content.control */ - file = pgFileNew(database_map_path, DATABASE_MAP, true, 0, - FIO_BACKUP_HOST); + file = pgFileNew(database_map_path, DATABASE_MAP, true, 0, drive); file->crc = pgFileGetCRC32C(database_map_path, false); file->write_size = file->size; file->uncompressed_size = file->size; diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 7df0d04e7..59ffd53db 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1032,7 +1032,7 @@ extern bool fileExists(const char *path, fio_location location); extern pgFile *pgFileNew(const char *path, const char *rel_path, bool follow_symlink, int external_dir_num, - fio_location location); + pioDrive_i drive); extern pgFile *pgFileInit(const char *rel_path); extern void pgFileFree(void *file); diff --git a/src/stream.c b/src/stream.c index df4606c0a..05c60d204 100644 --- a/src/stream.c +++ b/src/stream.c @@ -666,6 +666,7 @@ add_walsegment_to_filelist(parray *filelist, uint32 timeline, XLogRecPtr xlogpos char wal_segment_fullpath[MAXPGPATH]; pgFile *file = NULL; pgFile **existing_file = NULL; + pioDrive_i drive = pioDriveForLocation(FIO_BACKUP_HOST); GetXLogSegNo(xlogpos, xlog_segno, xlog_seg_size); @@ -683,7 +684,7 @@ add_walsegment_to_filelist(parray *filelist, uint32 timeline, XLogRecPtr xlogpos join_path_components(wal_segment_fullpath, basedir, wal_segment_name); join_path_components(wal_segment_relpath, PG_XLOG_DIR, wal_segment_name); - file = pgFileNew(wal_segment_fullpath, wal_segment_relpath, false, 0, FIO_BACKUP_HOST); + file = pgFileNew(wal_segment_fullpath, wal_segment_relpath, false, 0, drive); /* * Check if file is already in the list @@ -722,6 +723,7 @@ add_history_file_to_filelist(parray *filelist, uint32 timeline, char *basedir) char fullpath[MAXPGPATH]; char relpath[MAXPGPATH]; pgFile *file = NULL; + pioDrive_i drive = pioDriveForLocation(FIO_BACKUP_HOST); /* Timeline 1 does not have a history file */ if (timeline == 1) @@ -731,7 +733,7 @@ add_history_file_to_filelist(parray *filelist, uint32 timeline, char *basedir) join_path_components(fullpath, basedir, filename); join_path_components(relpath, PG_XLOG_DIR, filename); - file = pgFileNew(fullpath, relpath, false, 0, FIO_BACKUP_HOST); + file = pgFileNew(fullpath, relpath, false, 0, drive); /* calculate crc */ if (do_crc) diff --git a/src/utils/file.c b/src/utils/file.c index 4d4bae888..f6447aa66 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -93,10 +93,10 @@ typedef struct static void dir_list_file(parray *files, const char *root, bool handle_tablespaces, bool follow_symlink, bool backup_logs, bool skip_hidden, - int external_dir_num, fio_location location); + int external_dir_num, pioDrive_i drive); static void dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, bool handle_tablespaces, bool follow_symlink, bool backup_logs, - bool skip_hidden, int external_dir_num, fio_location location); + bool skip_hidden, int external_dir_num, pioDrive_i drive); void setMyLocation(ProbackupSubcmd const subcmd) @@ -3290,11 +3290,13 @@ fio_remove_dir_impl(int out, char* buf) { */ static void dir_list_file(parray *files, const char *root, bool handle_tablespaces, bool follow_symlink, - bool backup_logs, bool skip_hidden, int external_dir_num, fio_location location) + bool backup_logs, bool skip_hidden, int external_dir_num, pioDrive_i drive) { pgFile *file; - file = pgFileNew(root, "", follow_symlink, external_dir_num, location); + Assert(!$i(pioIsRemote, drive)); + + file = pgFileNew(root, "", follow_symlink, external_dir_num, drive); if (file == NULL) { /* For external directory this is not ok */ @@ -3315,7 +3317,7 @@ dir_list_file(parray *files, const char *root, bool handle_tablespaces, bool fol } dir_list_file_internal(files, file, root, handle_tablespaces, follow_symlink, - backup_logs, skip_hidden, external_dir_num, location); + backup_logs, skip_hidden, external_dir_num, drive); pgFileFree(file); } @@ -3333,19 +3335,21 @@ dir_list_file(parray *files, const char *root, bool handle_tablespaces, bool fol static void dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, bool handle_tablespaces, bool follow_symlink, bool backup_logs, - bool skip_hidden, int external_dir_num, fio_location location) + bool skip_hidden, int external_dir_num, pioDrive_i drive) { DIR *dir; struct dirent *dent; bool in_tablespace = false; + Assert(!$i(pioIsRemote, drive)); + if (parent->kind != PIO_KIND_DIRECTORY) elog(ERROR, "\"%s\" is not a directory", parent_dir); in_tablespace = path_is_prefix_of_path(PG_TBLSPC_DIR, parent->rel_path); /* Open directory and list contents */ - dir = fio_opendir(location, parent_dir); + dir = opendir(parent_dir); if (dir == NULL) { if (errno == ENOENT) @@ -3358,7 +3362,7 @@ dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, } errno = 0; - while ((dent = fio_readdir(dir))) + while ((dent = readdir(dir))) { pgFile *file; char child[MAXPGPATH]; @@ -3368,7 +3372,7 @@ dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, join_path_components(rel_child, parent->rel_path, dent->d_name); file = pgFileNew(child, rel_child, follow_symlink, - external_dir_num, location); + external_dir_num, drive); if (file == NULL) continue; @@ -3452,17 +3456,17 @@ dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, */ if (file->kind == PIO_KIND_DIRECTORY) dir_list_file_internal(files, file, child, handle_tablespaces, follow_symlink, - backup_logs, skip_hidden, external_dir_num, location); + backup_logs, skip_hidden, external_dir_num, drive); } if (errno && errno != ENOENT) { int errno_tmp = errno; - fio_closedir(dir); + closedir(dir); elog(ERROR, "Cannot read directory \"%s\": %s", parent_dir, strerror(errno_tmp)); } - fio_closedir(dir); + closedir(dir); } /* @@ -3477,7 +3481,7 @@ dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, * TODO: replace FIO_SEND_FILE and FIO_SEND_FILE_EOF with dedicated messages */ static void -fio_list_dir_impl(int out, char* buf) +fio_list_dir_impl(int out, char* buf, pioDrive_i drive) { int i; fio_header hdr; @@ -3494,7 +3498,7 @@ fio_list_dir_impl(int out, char* buf) dir_list_file(file_files, req->path, req->handle_tablespaces, req->follow_symlink, req->backup_logs, req->skip_hidden, - req->external_dir_num, FIO_LOCAL_HOST); + req->external_dir_num, drive); /* send information about files to the main process */ for (i = 0; i < parray_num(file_files); i++) @@ -3984,7 +3988,7 @@ fio_communicate(int in, int out) SYS_CHECK(ftruncate(fd[hdr.handle], hdr.arg)); break; case FIO_LIST_DIR: - fio_list_dir_impl(out, buf); + fio_list_dir_impl(out, buf, drive); break; case FIO_REMOVE_DIR: fio_remove_dir_impl(out, buf); @@ -4343,8 +4347,9 @@ pioLocalDrive_pioListDir(VSelf, parray *files, const char *root, bool handle_tab bool follow_symlink, bool backup_logs, bool skip_hidden, int external_dir_num) { FOBJ_FUNC_ARP(); + Self(pioLocalDrive); dir_list_file(files, root, handle_tablespaces, follow_symlink, backup_logs, - skip_hidden, external_dir_num, FIO_LOCAL_HOST); + skip_hidden, external_dir_num, $bind(pioDrive, self)); } static void @@ -4360,7 +4365,7 @@ pioLocalDrive_pioRemoveDir(VSelf, const char *root, bool root_as_well) { // adding the root directory because it must be deleted too if(root_as_well) - parray_append(files, pgFileNew(root, "", false, 0, FIO_LOCAL_HOST)); + parray_append(files, pgFileNew(root, "", false, 0, $bind(pioDrive, self))); /* delete leaf node first */ parray_qsort(files, pgFileCompareRelPathWithExternalDesc); From 1e954a396b0664a49c8d9668ad064bb2d734f1ed Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 28 Nov 2022 08:06:47 +0300 Subject: [PATCH 104/339] [PBCKP-368] pg_stop_backup_write_file_helper to use pioWriteFile --- src/backup.c | 56 +++++++++++++------------------------- src/catchup.c | 18 +++++------- src/fu_util/ft_util.h | 7 +++++ src/fu_util/impl/fo_impl.c | 2 +- src/fu_util/impl/ft_impl.h | 14 +++++++++- src/pg_probackup.h | 10 +++---- 6 files changed, 51 insertions(+), 56 deletions(-) diff --git a/src/backup.c b/src/backup.c index 02201338f..501aba2a7 100644 --- a/src/backup.c +++ b/src/backup.c @@ -1789,45 +1789,31 @@ pg_stop_backup_consume(PGconn *conn, int server_version, } /* get backup_label_content */ - result->backup_label_content = NULL; // if (!PQgetisnull(query_result, 0, backup_label_colno)) - result->backup_label_content_len = PQgetlength(query_result, 0, backup_label_colno); - if (result->backup_label_content_len > 0) - result->backup_label_content = pgut_strndup(PQgetvalue(query_result, 0, backup_label_colno), - result->backup_label_content_len); + result->backup_label_content = ft_strdupc(PQgetvalue(query_result, 0, backup_label_colno)); /* get tablespace_map_content */ - result->tablespace_map_content = NULL; // if (!PQgetisnull(query_result, 0, tablespace_map_colno)) - result->tablespace_map_content_len = PQgetlength(query_result, 0, tablespace_map_colno); - if (result->tablespace_map_content_len > 0) - result->tablespace_map_content = pgut_strndup(PQgetvalue(query_result, 0, tablespace_map_colno), - result->tablespace_map_content_len); + result->tablespace_map_content = ft_strdupc(PQgetvalue(query_result, 0, tablespace_map_colno)); } /* * helper routine used to write backup_label and tablespace_map in pg_stop_backup() */ void -pg_stop_backup_write_file_helper(const char *path, const char *filename, const char *error_msg_filename, - const void *data, size_t len, parray *file_list) +pg_stop_backup_write_file_helper(pioDrive_i drive, const char *path, const char *filename, const char *error_msg_filename, + ft_str_t data, parray *file_list) { - FILE *fp; pgFile *file; char full_filename[MAXPGPATH]; - pioDrive_i drive = pioDriveForLocation(FIO_BACKUP_HOST); + err_i err = $noerr(); join_path_components(full_filename, path, filename); - fp = fio_fopen(FIO_BACKUP_HOST, full_filename, PG_BINARY_W); - if (fp == NULL) - elog(ERROR, "can't open %s file \"%s\": %s", - error_msg_filename, full_filename, strerror(errno)); - if (fio_fwrite(fp, data, len) != len || - fio_fflush(fp) != 0 || - fio_fclose(fp)) - elog(ERROR, "can't write %s file \"%s\": %s", - error_msg_filename, full_filename, strerror(errno)); + err = $i(pioWriteFile, drive, .path = full_filename, + .content = ft_str2bytes(data)); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "writting stop backup file"); /* * It's vital to check if files_list is initialized, @@ -1894,26 +1880,22 @@ pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startb wait_wal_and_calculate_stop_lsn(xlog_path, stop_backup_result.lsn, backup); /* Write backup_label and tablespace_map */ - Assert(stop_backup_result.backup_label_content != NULL); + Assert(stop_backup_result.backup_label_content.len != 0); /* Write backup_label */ - pg_stop_backup_write_file_helper(backup->database_dir, PG_BACKUP_LABEL_FILE, "backup label", - stop_backup_result.backup_label_content, stop_backup_result.backup_label_content_len, - backup_files_list); - free(stop_backup_result.backup_label_content); - stop_backup_result.backup_label_content = NULL; - stop_backup_result.backup_label_content_len = 0; + pg_stop_backup_write_file_helper(backup->backup_location, + backup->database_dir, PG_BACKUP_LABEL_FILE, "backup label", + stop_backup_result.backup_label_content, backup_files_list); + ft_str_free(&stop_backup_result.backup_label_content); /* Write tablespace_map */ - if (stop_backup_result.tablespace_map_content != NULL) + if (stop_backup_result.tablespace_map_content.len != 0) { - pg_stop_backup_write_file_helper(backup->database_dir, PG_TABLESPACE_MAP_FILE, "tablespace map", - stop_backup_result.tablespace_map_content, stop_backup_result.tablespace_map_content_len, - backup_files_list); - free(stop_backup_result.tablespace_map_content); - stop_backup_result.tablespace_map_content = NULL; - stop_backup_result.tablespace_map_content_len = 0; + pg_stop_backup_write_file_helper(backup->backup_location, + backup->database_dir, PG_TABLESPACE_MAP_FILE, "tablespace map", + stop_backup_result.tablespace_map_content, backup_files_list); } + ft_str_free(&stop_backup_result.tablespace_map_content); if (backup->stream) { diff --git a/src/catchup.c b/src/catchup.c index 038767470..5bb82ae77 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -1041,19 +1041,17 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, wait_wal_and_calculate_stop_lsn(dest_xlog_path, stop_backup_result.lsn, ¤t); /* Write backup_label */ - Assert(stop_backup_result.backup_label_content != NULL); + Assert(stop_backup_result.backup_label_content.len != 0); if (!dry_run) { - pg_stop_backup_write_file_helper(dest_pgdata, PG_BACKUP_LABEL_FILE, "backup label", - stop_backup_result.backup_label_content, stop_backup_result.backup_label_content_len, - NULL); + pg_stop_backup_write_file_helper(local_location, + dest_pgdata, PG_BACKUP_LABEL_FILE, "backup label", + stop_backup_result.backup_label_content, NULL); } - free(stop_backup_result.backup_label_content); - stop_backup_result.backup_label_content = NULL; - stop_backup_result.backup_label_content_len = 0; + ft_str_free(&stop_backup_result.backup_label_content); /* tablespace_map */ - if (stop_backup_result.tablespace_map_content != NULL) + if (stop_backup_result.tablespace_map_content.len != 0) { // TODO what if tablespace is created during catchup? /* Because we have already created symlinks in pg_tblspc earlier, @@ -1063,10 +1061,8 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, * stop_backup_result.tablespace_map_content, stop_backup_result.tablespace_map_content_len, * NULL); */ - free(stop_backup_result.tablespace_map_content); - stop_backup_result.tablespace_map_content = NULL; - stop_backup_result.tablespace_map_content_len = 0; } + ft_str_free(&stop_backup_result.tablespace_map_content); /* wait for end of wal streaming and calculate wal size transfered */ if (!dry_run) diff --git a/src/fu_util/ft_util.h b/src/fu_util/ft_util.h index d4bdf0978..3ca658879 100644 --- a/src/fu_util/ft_util.h +++ b/src/fu_util/ft_util.h @@ -357,7 +357,14 @@ ft_inline ft_str_t ft_cstr(const char* ptr) { return (ft_str_t){.ptr = (char*)ptr, .len = ptr ? strlen(ptr) : 0}; } +ft_inline ft_bytes_t ft_str2bytes(ft_str_t str) { + return ft_bytes(str.ptr, str.len); +} + ft_inline ft_str_t ft_strdup(ft_str_t str); +ft_inline ft_str_t ft_strdupc(const char* str); +/* use only if string was allocated */ +ft_inline void ft_str_free(ft_str_t *str); /* print string into ft_malloc-ed buffer */ extern ft_str_t ft_asprintf(const char *fmt, ...) ft_gnu_printf(1,2); diff --git a/src/fu_util/impl/fo_impl.c b/src/fu_util/impl/fo_impl.c index d1532b517..3547c1d7c 100644 --- a/src/fu_util/impl/fo_impl.c +++ b/src/fu_util/impl/fo_impl.c @@ -661,7 +661,7 @@ fobj_newstr(ft_str_t s, enum FOBJ_STR_ALLOC ownership) { str = fobj_reservestr(s.len); memcpy(fobj_getstr(str).ptr, s.ptr, s.len); if (ownership == FOBJ_STR_GIFTED) - ft_free(s.ptr); + ft_str_free(&s); return str; } diff --git a/src/fu_util/impl/ft_impl.h b/src/fu_util/impl/ft_impl.h index 8c07ff9de..a6b4197f8 100644 --- a/src/fu_util/impl/ft_impl.h +++ b/src/fu_util/impl/ft_impl.h @@ -332,7 +332,7 @@ ft_bytes_move(ft_bytes_t *dest, ft_bytes_t *src) { // String utils ft_inline char * ft_cstrdup(const char *str) { - return (char*)ft_strdup(ft_cstr(str)).ptr; + return (char*)ft_strdupc(str).ptr; } ft_inline ft_str_t @@ -346,6 +346,18 @@ ft_strdup(ft_str_t str) { return str; } +ft_inline ft_str_t +ft_strdupc(const char *str) { + return ft_strdup(ft_cstr(str)); +} + +ft_inline void +ft_str_free(ft_str_t *str) { + ft_free(str->ptr); + str->ptr = NULL; + str->len = 0; +} + ft_inline bool ft_streq(ft_str_t str, ft_str_t oth) { return str.len == oth.len && strncmp(str.ptr, oth.ptr, str.len) == 0; diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 59ffd53db..4f20e91e6 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1247,10 +1247,8 @@ typedef struct PGStopBackupResult * Fields that store pg_catalog.pg_stop_backup() result */ XLogRecPtr lsn; - size_t backup_label_content_len; - char *backup_label_content; - size_t tablespace_map_content_len; - char *tablespace_map_content; + ft_str_t backup_label_content; + ft_str_t tablespace_map_content; } PGStopBackupResult; extern bool backup_in_progress; @@ -1265,8 +1263,8 @@ extern void pg_stop_backup_send(PGconn *conn, int server_version, bool is_starte extern void pg_stop_backup_consume(PGconn *conn, int server_version, uint32 timeout, const char *query_text, PGStopBackupResult *result); -extern void pg_stop_backup_write_file_helper(const char *path, const char *filename, const char *error_msg_filename, - const void *data, size_t len, parray *file_list); +extern void pg_stop_backup_write_file_helper(pioDrive_i drive, const char *path, const char *filename, const char *error_msg_filename, + ft_str_t data, parray *file_list); extern XLogRecPtr wait_wal_lsn(const char *wal_segment_dir, XLogRecPtr lsn, bool is_start_lsn, TimeLineID tli, bool in_prev_segment, bool segment_only, int timeout_elevel, bool in_stream_dir); From 0833965e3a90ffe79ee91d6f0294c8a993539d5d Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 28 Nov 2022 13:01:11 +0300 Subject: [PATCH 105/339] [PBCKP-368] use pioReadFile and pioWriteFile in update_recovery_options --- src/fu_util/ft_util.h | 21 ++++ src/fu_util/impl/ft_impl.h | 89 ++++++++++++++ src/restore.c | 235 ++++++++++++++++--------------------- src/utils/file.c | 11 ++ src/utils/file.h | 2 +- 5 files changed, 224 insertions(+), 134 deletions(-) diff --git a/src/fu_util/ft_util.h b/src/fu_util/ft_util.h index 3ca658879..8f2beca54 100644 --- a/src/fu_util/ft_util.h +++ b/src/fu_util/ft_util.h @@ -329,6 +329,11 @@ ft_inline void ft_bytes_free(ft_bytes_t* bytes) { ft_inline void ft_bytes_consume(ft_bytes_t *bytes, size_t cut); ft_inline void ft_bytes_move(ft_bytes_t *dest, ft_bytes_t *src); +ft_inline ft_bytes_t ft_bytes_shift_line(ft_bytes_t *bytes); +ft_inline size_t ft_bytes_find_bytes(ft_bytes_t haystack, ft_bytes_t needle); +ft_inline size_t ft_bytes_find_cstr(ft_bytes_t haystack, const char *needle); +ft_inline bool ft_bytes_has_cstr(ft_bytes_t haystack, const char *needle); + // String utils extern size_t ft_strlcpy(char *dest, const char* src, size_t dest_size); /* @@ -361,6 +366,18 @@ ft_inline ft_bytes_t ft_str2bytes(ft_str_t str) { return ft_bytes(str.ptr, str.len); } +ft_inline ft_bytes_t ft_str2bytes_withzb(ft_str_t str) { + return ft_bytes(str.ptr, str.len+1); +} + +/* + * casts to string checking last byte is zero + * note: + * bytes != ft_str2bytes(ft_bytes2str(bytes)) + * bytes == ft_str2bytes_withzb(ft_bytes2str(bytes)) + */ +ft_inline ft_str_t ft_bytes2str(ft_bytes_t bytes); + ft_inline ft_str_t ft_strdup(ft_str_t str); ft_inline ft_str_t ft_strdupc(const char* str); /* use only if string was allocated */ @@ -375,6 +392,10 @@ ft_inline FT_CMP_RES ft_strcmp (ft_str_t str, ft_str_t oth); ft_inline bool ft_streqc (ft_str_t str, const char* oth); ft_inline FT_CMP_RES ft_strcmpc(ft_str_t str, const char* oth); +/* cuts first line from argument */ +ft_inline void ft_str_consume(ft_str_t *str, size_t cut); +ft_inline ft_bytes_t ft_str_shift_line(ft_str_t *str); + /* * String buffer. * It could be growable or fixed. diff --git a/src/fu_util/impl/ft_impl.h b/src/fu_util/impl/ft_impl.h index a6b4197f8..d1f2fde72 100644 --- a/src/fu_util/impl/ft_impl.h +++ b/src/fu_util/impl/ft_impl.h @@ -329,7 +329,70 @@ ft_bytes_move(ft_bytes_t *dest, ft_bytes_t *src) { ft_bytes_consume(src, len); } +ft_inline ft_bytes_t +ft_bytes_shift_line(ft_bytes_t *bytes) +{ + size_t i; + char *p = bytes->ptr; + + for (i = 0; i < bytes->len; i++) { + if (p[i] == '\r' || p[i] == '\n') { + if (p[i] == '\r' && i+1 < bytes->len && p[i+1] == '\n') + i++; + ft_bytes_consume(bytes, i+1); + return ft_bytes(p, i+1); + } + } + + ft_bytes_consume(bytes, bytes->len); + return ft_bytes(p, i); +} + +ft_inline size_t +ft_bytes_find_bytes(ft_bytes_t haystack, ft_bytes_t needle) +{ + // TODO use memmem if present + size_t i; + char first; + + if (needle.len == 0) + return 0; + if (needle.len > haystack.len) + return haystack.len; + + first = needle.ptr[0]; + for (i = 0; i < haystack.len - needle.len; i++) + { + if (haystack.ptr[i] != first) + continue; + if (memcmp(haystack.ptr + i, needle.ptr, needle.len) == 0) + return i; + } + + return haystack.len; +} + +ft_inline size_t +ft_bytes_find_cstr(ft_bytes_t haystack, const char* needle) +{ + return ft_bytes_find_bytes(haystack, ft_str2bytes(ft_cstr(needle))); +} + +ft_inline bool +ft_bytes_has_cstr(ft_bytes_t haystack, const char* needle) +{ + size_t pos = ft_bytes_find_cstr(haystack, needle); + return pos != haystack.len; +} + // String utils + +ft_inline ft_str_t +ft_bytes2str(ft_bytes_t bytes) { + ft_dbg_assert(bytes.ptr[bytes.len-1] == '\0'); + return ft_str(bytes.ptr, bytes.len-1); +} + ft_inline char * ft_cstrdup(const char *str) { return (char*)ft_strdupc(str).ptr; @@ -379,6 +442,32 @@ ft_strcmpc(ft_str_t str, const char* oth) { return ft_strcmp(str, ft_cstr(oth)); } +ft_inline void +ft_str_consume(ft_str_t *str, size_t cut) { + ft_dbg_assert(cut <= str->len); + str->ptr = str->ptr + cut; + str->len -= cut; +} + +ft_inline ft_bytes_t +ft_str_shift_line(ft_str_t *str) +{ + size_t i; + char *p = str->ptr; + + for (i = 0; i < str->len; i++) { + if (p[i] == '\r' || p[i] == '\n') { + if (p[i] == '\r' && p[i+1] == '\n') + i++; + ft_str_consume(str, i+1); + return ft_bytes(p, i+1); + } + } + + ft_str_consume(str, str->len); + return ft_bytes(p, i); +} + ft_inline ft_strbuf_t ft_strbuf_zero(void) { return (ft_strbuf_t){.ptr = "", .len = 0, .cap = 0}; diff --git a/src/restore.c b/src/restore.c index 5a517d4cf..5c6262895 100644 --- a/src/restore.c +++ b/src/restore.c @@ -40,10 +40,10 @@ typedef struct static void -print_recovery_settings(InstanceState *instanceState, FILE *fp, pgBackup *backup, +print_recovery_settings(ft_strbuf_t *buf, InstanceState *instanceState, pgBackup *backup, pgRestoreParams *params, pgRecoveryTarget *rt); static void -print_standby_settings_common(FILE *fp, pgBackup *backup, pgRestoreParams *params); +print_standby_settings_common(ft_strbuf_t *buf, pgBackup *backup, pgRestoreParams *params); #if PG_VERSION_NUM >= 120000 static void @@ -1374,20 +1374,25 @@ create_recovery_conf(InstanceState *instanceState, time_t backup_id, /* TODO get rid of using global variables: instance_config */ static void -print_recovery_settings(InstanceState *instanceState, FILE *fp, pgBackup *backup, +print_recovery_settings(ft_strbuf_t *buf, InstanceState *instanceState, pgBackup *backup, pgRestoreParams *params, pgRecoveryTarget *rt) { - char restore_command_guc[16384]; - fio_fprintf(fp, "## recovery settings\n"); + char restore_command_buf[1024]; + ft_strbuf_t restore_command_guc; + + restore_command_guc = ft_strbuf_init_stack(restore_command_buf, + sizeof(restore_command_buf)); + + ft_strbuf_catf(buf, "## recovery settings\n"); /* If restore_command is provided, use it. Otherwise construct it from scratch. */ if (instance_config.restore_command && (pg_strcasecmp(instance_config.restore_command, "none") != 0)) - sprintf(restore_command_guc, "%s", instance_config.restore_command); + ft_strbuf_catc(&restore_command_guc, instance_config.restore_command); else { /* default cmdline, ok for local restore */ - sprintf(restore_command_guc, "\"%s\" archive-get -B \"%s\" --instance \"%s\" " + ft_strbuf_catf(&restore_command_guc, "\"%s\" archive-get -B \"%s\" --instance \"%s\" " "--wal-file-path=%%p --wal-file-name=%%f", PROGRAM_FULL_PATH ? PROGRAM_FULL_PATH : PROGRAM_NAME, /* TODO What is going on here? Why do we use catalog path as wal-file-path? */ @@ -1396,20 +1401,20 @@ print_recovery_settings(InstanceState *instanceState, FILE *fp, pgBackup *backup /* append --remote-* parameters provided via --archive-* settings */ if (instance_config.archive.host) { - strcat(restore_command_guc, " --remote-host="); - strcat(restore_command_guc, instance_config.archive.host); + ft_strbuf_catc(&restore_command_guc, " --remote-host="); + ft_strbuf_catc(&restore_command_guc, instance_config.archive.host); } if (instance_config.archive.port) { - strcat(restore_command_guc, " --remote-port="); - strcat(restore_command_guc, instance_config.archive.port); + ft_strbuf_catc(&restore_command_guc, " --remote-port="); + ft_strbuf_catc(&restore_command_guc, instance_config.archive.port); } if (instance_config.archive.user) { - strcat(restore_command_guc, " --remote-user="); - strcat(restore_command_guc, instance_config.archive.user); + ft_strbuf_catc(&restore_command_guc, " --remote-user="); + ft_strbuf_catc(&restore_command_guc, instance_config.archive.user); } } @@ -1418,26 +1423,26 @@ print_recovery_settings(InstanceState *instanceState, FILE *fp, pgBackup *backup * exclusive options is specified, so the order of calls is insignificant. */ if (rt->target_name) - fio_fprintf(fp, "recovery_target_name = '%s'\n", rt->target_name); + ft_strbuf_catf(buf, "recovery_target_name = '%s'\n", rt->target_name); if (rt->time_string) - fio_fprintf(fp, "recovery_target_time = '%s'\n", rt->time_string); + ft_strbuf_catf(buf, "recovery_target_time = '%s'\n", rt->time_string); if (rt->xid_string) - fio_fprintf(fp, "recovery_target_xid = '%s'\n", rt->xid_string); + ft_strbuf_catf(buf, "recovery_target_xid = '%s'\n", rt->xid_string); if (rt->lsn_string) - fio_fprintf(fp, "recovery_target_lsn = '%s'\n", rt->lsn_string); + ft_strbuf_catf(buf, "recovery_target_lsn = '%s'\n", rt->lsn_string); if (rt->target_stop && (strcmp(rt->target_stop, "immediate") == 0)) - fio_fprintf(fp, "recovery_target = '%s'\n", rt->target_stop); + ft_strbuf_catf(buf, "recovery_target = '%s'\n", rt->target_stop); if (rt->inclusive_specified) - fio_fprintf(fp, "recovery_target_inclusive = '%s'\n", + ft_strbuf_catf(buf, "recovery_target_inclusive = '%s'\n", rt->target_inclusive ? "true" : "false"); if (rt->target_tli) - fio_fprintf(fp, "recovery_target_timeline = '%u'\n", rt->target_tli); + ft_strbuf_catf(buf, "recovery_target_timeline = '%u'\n", rt->target_tli); else { #if PG_VERSION_NUM >= 120000 @@ -1447,31 +1452,33 @@ print_recovery_settings(InstanceState *instanceState, FILE *fp, pgBackup *backup * is extremely risky. Explicitly preserve old behavior of recovering to current * timneline for PG12. */ - fio_fprintf(fp, "recovery_target_timeline = 'current'\n"); + ft_strbuf_catf(buf, "recovery_target_timeline = 'current'\n"); #endif } if (rt->target_action) - fio_fprintf(fp, "recovery_target_action = '%s'\n", rt->target_action); + ft_strbuf_catf(buf, "recovery_target_action = '%s'\n", rt->target_action); else /* default recovery_target_action is 'pause' */ - fio_fprintf(fp, "recovery_target_action = '%s'\n", "pause"); + ft_strbuf_catf(buf, "recovery_target_action = '%s'\n", "pause"); + + elog(LOG, "Setting restore_command to '%s'", restore_command_guc.ptr); + ft_strbuf_catf(buf, "restore_command = '%s'\n", restore_command_guc.ptr); - elog(LOG, "Setting restore_command to '%s'", restore_command_guc); - fio_fprintf(fp, "restore_command = '%s'\n", restore_command_guc); + ft_strbuf_free(&restore_command_guc); } static void -print_standby_settings_common(FILE *fp, pgBackup *backup, pgRestoreParams *params) +print_standby_settings_common(ft_strbuf_t *buf, pgBackup *backup, pgRestoreParams *params) { - fio_fprintf(fp, "\n## standby settings\n"); + ft_strbuf_catf(buf, "\n## standby settings\n"); if (params->primary_conninfo) - fio_fprintf(fp, "primary_conninfo = '%s'\n", params->primary_conninfo); + ft_strbuf_catf(buf, "primary_conninfo = '%s'\n", params->primary_conninfo); else if (backup->primary_conninfo) - fio_fprintf(fp, "primary_conninfo = '%s'\n", backup->primary_conninfo); + ft_strbuf_catf(buf, "primary_conninfo = '%s'\n", backup->primary_conninfo); if (params->primary_slot_name != NULL) - fio_fprintf(fp, "primary_slot_name = '%s'\n", params->primary_slot_name); + ft_strbuf_catf(buf, "primary_slot_name = '%s'\n", params->primary_slot_name); } #if PG_VERSION_NUM < 120000 @@ -1479,8 +1486,9 @@ static void update_recovery_options_before_v12(InstanceState *instanceState, pgBackup *backup, pgRestoreParams *params, pgRecoveryTarget *rt) { - FILE *fp; char path[MAXPGPATH]; + ft_strbuf_t buf = ft_strbuf_zero(); + err_i err; /* * If PITR is not requested and instance is not restored as replica, @@ -1492,33 +1500,27 @@ update_recovery_options_before_v12(InstanceState *instanceState, pgBackup *backu return; } - elog(LOG, "update recovery settings in recovery.conf"); - join_path_components(path, instance_config.pgdata, "recovery.conf"); - - fp = fio_fopen(FIO_DB_HOST, path, "w"); - if (fp == NULL) - elog(ERROR, "cannot open file \"%s\": %s", path, - strerror(errno)); - - if (fio_chmod(FIO_DB_HOST, path, FILE_PERMISSION) == -1) - elog(ERROR, "Cannot change mode of \"%s\": %s", path, strerror(errno)); - - fio_fprintf(fp, "# recovery.conf generated by pg_probackup %s\n", + ft_strbuf_catf(&buf, "# recovery.conf generated by pg_probackup %s\n", PROGRAM_VERSION); if (params->recovery_settings_mode == PITR_REQUESTED) - print_recovery_settings(instanceState, fp, backup, params, rt); + print_recovery_settings(&buf, instanceState, backup, params, rt); if (params->restore_as_replica) { - print_standby_settings_common(fp, backup, params); - fio_fprintf(fp, "standby_mode = 'on'\n"); + print_standby_settings_common(&buf, backup, params); + ft_strbuf_catc(&buf, "standby_mode = 'on'\n"); } - if (fio_fflush(fp) != 0 || - fio_fclose(fp)) - elog(ERROR, "cannot write file \"%s\": %s", path, - strerror(errno)); + elog(LOG, "update recovery settings in recovery.conf"); + + join_path_components(path, instance_config.pgdata, "recovery.conf"); + + err = $i(pioWriteFile, backup->database_location, .path = path, + .content = ft_str2bytes(ft_strbuf_ref(&buf)), .binary = false); + + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "writting recovery settings"); } #endif @@ -1536,116 +1538,75 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, char postgres_auto_path[MAXPGPATH]; char postgres_auto_path_tmp[MAXPGPATH]; char path[MAXPGPATH]; - FILE *fp = NULL; FILE *fp_tmp = NULL; - pio_stat_t st; char current_time_str[100]; /* postgresql.auto.conf parsing */ - char line[16384] = "\0"; - char *buf = NULL; - int buf_len = 0; - int buf_len_max = 16384; + ft_bytes_t old_content; + ft_bytes_t parse; + ft_bytes_t line; + ft_bytes_t zero = ft_bytes(NULL, 0); + ft_strbuf_t content; err_i err; + content = ft_strbuf_zero(); + elog(LOG, "update recovery settings in postgresql.auto.conf"); time2iso(current_time_str, lengthof(current_time_str), current_time, false); join_path_components(postgres_auto_path, instance_config.pgdata, "postgresql.auto.conf"); - st = $i(pioStat, pioDriveForLocation(FIO_DB_HOST), - .path = postgres_auto_path, .follow_symlink = false, .err = &err); + old_content = $i(pioReadFile, backup->database_location, + .path = postgres_auto_path, .err = &err, .binary = false); + /* file not found is not an error case */ if ($haserr(err) && getErrno(err) != ENOENT) { ft_logerr(FT_FATAL, $errmsg(err), ""); } - /* Kludge for 0-sized postgresql.auto.conf file. TODO: make something more intelligent */ - if (st.pst_size > 0) - { - fp = fio_open_stream(FIO_DB_HOST, postgres_auto_path); - if (fp == NULL) - elog(ERROR, "cannot open \"%s\": %s", postgres_auto_path, strerror(errno)); - } - sprintf(postgres_auto_path_tmp, "%s.tmp", postgres_auto_path); fp_tmp = fio_fopen(FIO_DB_HOST, postgres_auto_path_tmp, "w"); if (fp_tmp == NULL) elog(ERROR, "cannot open \"%s\": %s", postgres_auto_path_tmp, strerror(errno)); - while (fp && fgets(line, lengthof(line), fp)) + parse = old_content; /* copy since ft_bytes_shift_line mutates bytes */ + /* chomp zero byte */ + ft_dbg_assert(parse.ptr[parse.len-1] == 0); + parse.len -= 1; + + while (parse.len > 0) { + line = ft_bytes_shift_line(&parse); /* ignore "include 'probackup_recovery.conf'" directive */ - if (strstr(line, "include") && - strstr(line, "probackup_recovery.conf")) + if (ft_bytes_has_cstr(line, "include") && + ft_bytes_has_cstr(line, "probackup_recovery.conf")) { continue; } /* ignore already existing recovery options */ - if (strstr(line, "restore_command") || - strstr(line, "recovery_target")) + if (ft_bytes_has_cstr(line, "restore_command") || + ft_bytes_has_cstr(line, "recovery_target")) { continue; } - if (!buf) - buf = pgut_malloc(buf_len_max); - - /* avoid buffer overflow */ - if ((buf_len + strlen(line)) >= buf_len_max) - { - buf_len_max += (buf_len + strlen(line)) *2; - buf = pgut_realloc(buf, buf_len_max); - } - - buf_len += snprintf(buf+buf_len, sizeof(line), "%s", line); + ft_strbuf_catbytes(&content, line); } - /* close input postgresql.auto.conf */ - if (fp) - fio_close_stream(fp); - - /* Write data to postgresql.auto.conf.tmp */ - if (buf_len > 0 && - (fio_fwrite(fp_tmp, buf, buf_len) != buf_len)) - elog(ERROR, "Cannot write to \"%s\": %s", - postgres_auto_path_tmp, strerror(errno)); - - if (fio_fflush(fp_tmp) != 0 || - fio_fclose(fp_tmp)) - elog(ERROR, "Cannot write file \"%s\": %s", postgres_auto_path_tmp, - strerror(errno)); - pg_free(buf); - - if (fio_rename(FIO_DB_HOST, postgres_auto_path_tmp, postgres_auto_path) < 0) - elog(ERROR, "Cannot rename file \"%s\" to \"%s\": %s", - postgres_auto_path_tmp, postgres_auto_path, strerror(errno)); - - if (fio_chmod(FIO_DB_HOST, postgres_auto_path, FILE_PERMISSION) == -1) - elog(ERROR, "Cannot change mode of \"%s\": %s", postgres_auto_path, strerror(errno)); + ft_bytes_free(&old_content); if (params) { - fp = fio_fopen(FIO_DB_HOST, postgres_auto_path, "a"); - if (fp == NULL) - elog(ERROR, "cannot open file \"%s\": %s", postgres_auto_path, - strerror(errno)); - - fio_fprintf(fp, "\n# recovery settings added by pg_probackup restore of backup %s at '%s'\n", + ft_strbuf_catf(&content, "\n# recovery settings added by pg_probackup restore of backup %s at '%s'\n", backup_id_of(backup), current_time_str); if (params->recovery_settings_mode == PITR_REQUESTED) - print_recovery_settings(instanceState, fp, backup, params, rt); + print_recovery_settings(&content, instanceState, backup, params, rt); if (params->restore_as_replica) - print_standby_settings_common(fp, backup, params); - - if (fio_fflush(fp) != 0 || - fio_fclose(fp)) - elog(ERROR, "cannot write file \"%s\": %s", postgres_auto_path, - strerror(errno)); + print_standby_settings_common(&content, backup, params); /* * Create "recovery.signal" to mark this recovery as PITR for PostgreSQL. @@ -1662,15 +1623,11 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, elog(LOG, "creating recovery.signal file"); join_path_components(path, instance_config.pgdata, "recovery.signal"); - fp = fio_fopen(FIO_DB_HOST, path, PG_BINARY_W); - if (fp == NULL) - elog(ERROR, "cannot open file \"%s\": %s", path, - strerror(errno)); + err = $i(pioWriteFile, backup->database_location, .path = path, + .content = zero); - if (fio_fflush(fp) != 0 || - fio_fclose(fp)) - elog(ERROR, "cannot write file \"%s\": %s", path, - strerror(errno)); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "writting recovery.signal"); } if (params->restore_as_replica) @@ -1678,17 +1635,29 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, elog(LOG, "creating standby.signal file"); join_path_components(path, instance_config.pgdata, "standby.signal"); - fp = fio_fopen(FIO_DB_HOST, path, PG_BINARY_W); - if (fp == NULL) - elog(ERROR, "cannot open file \"%s\": %s", path, - strerror(errno)); + err = $i(pioWriteFile, backup->database_location, .path = path, + .content = zero); - if (fio_fflush(fp) != 0 || - fio_fclose(fp)) - elog(ERROR, "cannot write file \"%s\": %s", path, - strerror(errno)); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "writting standby.signal"); } } + + /* Write data to postgresql.auto.conf.tmp */ + err = $i(pioWriteFile, backup->database_location, + .path = postgres_auto_path_tmp, + .content = ft_str2bytes(ft_strbuf_ref(&content))); + ft_strbuf_free(&content); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "writting recovery options"); + + err = $i(pioRename, backup->database_location, + .old_path = postgres_auto_path_tmp, + .new_path = postgres_auto_path); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "renaming postgres.auto.conf file"); + + /* skip chmod, since pioWriteFile creates with FILE_PERMISSION */ } #endif diff --git a/src/utils/file.c b/src/utils/file.c index f6447aa66..e384d8f52 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -181,6 +181,13 @@ remove_file_or_dir(const char* path) #define remove_file_or_dir(path) remove(path) #endif +static void +fio_ensure_remote(void) +{ + if (!fio_stdin && !launch_agent()) + elog(ERROR, "Failed to establish SSH connection: %s", strerror(errno)); +} + /* Check if specified location is local for current node */ bool fio_is_remote(fio_location location) @@ -4953,6 +4960,8 @@ pioRemoteDrive_pioReadFile(VSelf, path_t path, bool binary, err_i* err) fobj_reset_err(err); + fio_ensure_remote(); + fio_header hdr = { .cop = FIO_READ_FILE_AT_ONCE, .handle = -1, @@ -4993,6 +5002,8 @@ pioRemoteDrive_pioWriteFile(VSelf, path_t path, ft_bytes_t content, bool binary) fobj_reset_err(&err); + fio_ensure_remote(); + if (content.len > PIO_READ_WRITE_FILE_LIMIT) { err = $err(RT, "File content too large {path:q}: {size}", diff --git a/src/utils/file.h b/src/utils/file.h index 315dbb54b..5fd4f6b81 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -280,7 +280,7 @@ fobj_iface(pioReadCloser); (bool, backup_logs), (bool, skip_hidden), (int, external_dir_num) #define mth__pioRemoveDir void, (const char *, root), (bool, root_as_well) /* pioReadFile and pioWriteFile should be used only for small files */ -#define PIO_READ_WRITE_FILE_LIMIT (1024*1024) +#define PIO_READ_WRITE_FILE_LIMIT (16*1024*1024) #define mth__pioReadFile ft_bytes_t, (path_t, path), (bool, binary), \ (err_i *, err) #define mth__pioReadFile__optional() (binary, true) From 4b50e5db0bebc6556f6a360208bba0442538b130 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 29 Nov 2022 10:52:08 +0300 Subject: [PATCH 106/339] pioReadFile - do not add zero byte to text file output. it produces more problems, than solves. --- src/fu_util/ft_util.h | 10 ---------- src/fu_util/impl/ft_impl.h | 25 ------------------------- src/restore.c | 3 --- src/utils/file.c | 8 ++------ 4 files changed, 2 insertions(+), 44 deletions(-) diff --git a/src/fu_util/ft_util.h b/src/fu_util/ft_util.h index 8f2beca54..20a03a3fb 100644 --- a/src/fu_util/ft_util.h +++ b/src/fu_util/ft_util.h @@ -370,14 +370,6 @@ ft_inline ft_bytes_t ft_str2bytes_withzb(ft_str_t str) { return ft_bytes(str.ptr, str.len+1); } -/* - * casts to string checking last byte is zero - * note: - * bytes != ft_str2bytes(ft_bytes2str(bytes)) - * bytes == ft_str2bytes_withzb(ft_bytes2str(bytes)) - */ -ft_inline ft_str_t ft_bytes2str(ft_bytes_t bytes); - ft_inline ft_str_t ft_strdup(ft_str_t str); ft_inline ft_str_t ft_strdupc(const char* str); /* use only if string was allocated */ @@ -392,9 +384,7 @@ ft_inline FT_CMP_RES ft_strcmp (ft_str_t str, ft_str_t oth); ft_inline bool ft_streqc (ft_str_t str, const char* oth); ft_inline FT_CMP_RES ft_strcmpc(ft_str_t str, const char* oth); -/* cuts first line from argument */ ft_inline void ft_str_consume(ft_str_t *str, size_t cut); -ft_inline ft_bytes_t ft_str_shift_line(ft_str_t *str); /* * String buffer. diff --git a/src/fu_util/impl/ft_impl.h b/src/fu_util/impl/ft_impl.h index d1f2fde72..a90b74bdc 100644 --- a/src/fu_util/impl/ft_impl.h +++ b/src/fu_util/impl/ft_impl.h @@ -387,12 +387,6 @@ ft_bytes_has_cstr(ft_bytes_t haystack, const char* needle) // String utils -ft_inline ft_str_t -ft_bytes2str(ft_bytes_t bytes) { - ft_dbg_assert(bytes.ptr[bytes.len-1] == '\0'); - return ft_str(bytes.ptr, bytes.len-1); -} - ft_inline char * ft_cstrdup(const char *str) { return (char*)ft_strdupc(str).ptr; @@ -449,25 +443,6 @@ ft_str_consume(ft_str_t *str, size_t cut) { str->len -= cut; } -ft_inline ft_bytes_t -ft_str_shift_line(ft_str_t *str) -{ - size_t i; - char *p = str->ptr; - - for (i = 0; i < str->len; i++) { - if (p[i] == '\r' || p[i] == '\n') { - if (p[i] == '\r' && p[i+1] == '\n') - i++; - ft_str_consume(str, i+1); - return ft_bytes(p, i+1); - } - } - - ft_str_consume(str, str->len); - return ft_bytes(p, i); -} - ft_inline ft_strbuf_t ft_strbuf_zero(void) { return (ft_strbuf_t){.ptr = "", .len = 0, .cap = 0}; diff --git a/src/restore.c b/src/restore.c index 5c6262895..31d2cfba5 100644 --- a/src/restore.c +++ b/src/restore.c @@ -1571,9 +1571,6 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, elog(ERROR, "cannot open \"%s\": %s", postgres_auto_path_tmp, strerror(errno)); parse = old_content; /* copy since ft_bytes_shift_line mutates bytes */ - /* chomp zero byte */ - ft_dbg_assert(parse.ptr[parse.len-1] == 0); - parse.len -= 1; while (parse.len > 0) { diff --git a/src/utils/file.c b/src/utils/file.c index e384d8f52..20c1ab133 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -4472,13 +4472,9 @@ pioLocalDrive_pioReadFile(VSelf, path_t path, bool binary, err_i* err) return res; } - if (binary) - res.len = amount; - else - { - res.len = amount + 1; + res.len = amount; + if (!binary) res.ptr[amount] = 0; - } $i(pioClose, fl); return res; From 9a929002085e3014df0ecb8d6df5f19a6273b7fa Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 29 Nov 2022 12:04:36 +0300 Subject: [PATCH 107/339] [PBCKP-368] - pioClose sync --- src/utils/file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/utils/file.c b/src/utils/file.c index 20c1ab133..edf1a54f0 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -4524,7 +4524,7 @@ pioLocalDrive_pioWriteFile(VSelf, path_t path, ft_bytes_t content, bool binary) if ($haserr(err)) return $iresult(err); - err = $i(pioClose, fl); + err = $i(pioClose, fl, .sync = true); if ($haserr(err)) { $(pioRemove, self, .path = path); From 824b7d86b14b932db603a8e8f6ee5e907c3f8f16 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 29 Nov 2022 12:09:06 +0300 Subject: [PATCH 108/339] [PBCKP-368] use pioWriteFile in catalog.c --- src/catalog.c | 62 ++++++++------------------------------------------- src/show.c | 2 +- 2 files changed, 10 insertions(+), 54 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index 64f96f252..b197ca482 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -2431,71 +2431,27 @@ write_backup(pgBackup *backup, bool strict) { FOBJ_FUNC_ARP(); - pioFile_i out; + ft_str_t buf; char path_temp[MAXPGPATH]; char path[MAXPGPATH]; err_i err = $noerr(); - pioDrive_i backup_drive = pioDriveForLocation(FIO_BACKUP_HOST); - join_path_components(path, backup->root_dir, BACKUP_CONTROL_FILE); snprintf(path_temp, sizeof(path_temp), "%s.tmp", path); - out = $i(pioOpen, backup_drive, path_temp, - .flags = O_WRONLY | O_CREAT | O_EXCL | PG_BINARY, - .err = &err); - - if ($noerr(err)) - { - size_t length; - ft_str_t buf = pgBackupWriteControl(backup, true); - length = $i(pioWrite, out, ft_bytes(buf.ptr, buf.len), &err); + buf = pgBackupWriteControl(backup, true); + err = $i(pioWriteFile, backup->backup_location, .path = path_temp, + .content = ft_bytes(buf.ptr, buf.len), .binary = false); - ft_free((char*)buf.ptr); - - if (length != buf.len) - elog(ERROR, "Incorrect size of writing data"); - } - else - elog(ERROR, "Failed to open file \"%s\" ", path_temp); + ft_str_free(&buf); - err = $i(pioWriteFinish, out); if ($haserr(err)) - { - int elevel = ERROR; - int save_errno = errno; - - if (!strict && (errno == ENOSPC)) - elevel = WARNING; + ft_logerr(FT_FATAL, $errmsg(err), "Writting " BACKUP_CONTROL_FILE ".tmp"); - elog(elevel, "Cannot flush control file \"%s\": %s", - path_temp, strerror(save_errno)); - - if (!strict && (save_errno == ENOSPC)) - { - err = $i(pioClose, out); - $i(pioRemove, backup_drive, path_temp, false); - if ($haserr(err)) - elog(elevel, "Additionally cannot remove file \"%s\": %s", path_temp, strerror(errno)); - return; - } - } - - /* Ignore 'out of space' error in lax mode */ - err = $i(pioClose, out); + err = $i(pioRename, backup->backup_location, + .old_path = path_temp, .new_path = path); if ($haserr(err)) - { - elog(ERROR, "Cannot close control file \"%s\": %s", - path_temp, strerror(errno)); - } - - if (fio_sync(FIO_BACKUP_HOST, path_temp) < 0) - elog(ERROR, "Cannot sync control file \"%s\": %s", - path_temp, strerror(errno)); - - if (rename(path_temp, path) < 0) - elog(ERROR, "Cannot rename file \"%s\" to \"%s\": %s", - path_temp, path, strerror(errno)); + ft_logerr(FT_FATAL, $errmsg(err), "Renaming " BACKUP_CONTROL_FILE); } /* diff --git a/src/show.c b/src/show.c index a2bc6b696..81ac86f68 100644 --- a/src/show.c +++ b/src/show.c @@ -523,7 +523,7 @@ show_backup(InstanceState *instanceState, time_t requested_backup_id) fwrite(buf.ptr, 1, buf.len, stdout); - ft_free((char*)buf.ptr); + ft_str_free(&buf); } else elog(ERROR, "Invalid show format %d", (int) show_format); From 83200596bb60dc4b2ef92f4eff8d0ac942f349f5 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 29 Nov 2022 12:25:44 +0300 Subject: [PATCH 109/339] [PBCKP-368] use pioWriteFile to write control file. --- src/util.c | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/src/util.c b/src/util.c index 05d3b4711..5e9699a2f 100644 --- a/src/util.c +++ b/src/util.c @@ -86,30 +86,25 @@ checkControlFile(ControlFileData *ControlFile) static void writeControlFile(fio_location location, const char *path, ControlFileData *ControlFile) { - int fd; char *buffer = NULL; + pioDrive_i drive; + err_i err; int ControlFileSize = PG_CONTROL_FILE_SIZE; + drive = pioDriveForLocation(location); + /* copy controlFileSize */ buffer = pg_malloc0(ControlFileSize); memcpy(buffer, ControlFile, sizeof(ControlFileData)); - /* Write pg_control */ - fd = fio_open(location, path, - O_RDWR | O_CREAT | O_TRUNC | PG_BINARY); - - if (fd < 0) - elog(ERROR, "Failed to open file: %s", path); + err = $i(pioWriteFile, drive, .path = path, + .content = ft_bytes(buffer, ControlFileSize)); - if (fio_write(fd, buffer, ControlFileSize) != ControlFileSize) - elog(ERROR, "Failed to overwrite file: %s", path); - - if (fio_flush(fd) != 0) - elog(ERROR, "Failed to sync file: %s", path); - - fio_close(fd); pg_free(buffer); + + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Writting control file"); } /* From b5283d1273e8763d28b83e035507a21ce507f301 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 29 Nov 2022 13:00:16 +0300 Subject: [PATCH 110/339] [PBCKP-368] use pioWriteFile for write_database_map --- src/dir.c | 33 ++++++++++++++++----------------- src/pg_probackup.h | 1 - 2 files changed, 16 insertions(+), 18 deletions(-) diff --git a/src/dir.c b/src/dir.c index 070499bb0..124e76b37 100644 --- a/src/dir.c +++ b/src/dir.c @@ -132,6 +132,8 @@ static void control_string_bad_format(const char* str); static bool exclude_files_cb(void *value, void *exclude_args); +static void print_database_map(ft_strbuf_t *buf, parray *database_list); + /* Tablespace mapping */ static TablespaceList tablespace_dirs = {NULL, NULL}; /* Extra directories mapping */ @@ -1424,8 +1426,8 @@ backup_contains_external(const char *dir, parray *dirs_list) /* * Print database_map */ -void -print_database_map(FILE *out, parray *database_map) +static void +print_database_map(ft_strbuf_t *buf, parray *database_map) { int i; @@ -1433,7 +1435,7 @@ print_database_map(FILE *out, parray *database_map) { db_map_entry *db_entry = (db_map_entry *) parray_get(database_map, i); - fio_fprintf(out, "{\"dbOid\":\"%u\", \"datname\":\"%s\"}\n", + ft_strbuf_catf(buf, "{\"dbOid\":\"%u\", \"datname\":\"%s\"}\n", db_entry->dbOid, db_entry->datname); } @@ -1446,29 +1448,26 @@ print_database_map(FILE *out, parray *database_map) void write_database_map(pgBackup *backup, parray *database_map, parray *backup_files_list) { - FILE *fp; + ft_strbuf_t buf; pgFile *file; char database_dir[MAXPGPATH]; char database_map_path[MAXPGPATH]; pioDrive_i drive = pioDriveForLocation(FIO_BACKUP_HOST); + err_i err; join_path_components(database_dir, backup->root_dir, DATABASE_DIR); join_path_components(database_map_path, database_dir, DATABASE_MAP); - fp = fio_fopen(FIO_BACKUP_HOST, database_map_path, PG_BINARY_W); - if (fp == NULL) - elog(ERROR, "Cannot open database map \"%s\": %s", database_map_path, - strerror(errno)); + buf = ft_strbuf_zero(); + print_database_map(&buf, database_map); - print_database_map(fp, database_map); - if (fio_fflush(fp) || fio_fclose(fp)) - { - int save_errno = errno; - if (fio_remove(FIO_BACKUP_HOST, database_map_path, false) != 0) - elog(WARNING, "Cannot cleanup database map \"%s\": %s", database_map_path, strerror(errno)); - elog(ERROR, "Cannot write database map \"%s\": %s", - database_map_path, strerror(save_errno)); - } + err = $i(pioWriteFile, drive, .path = database_map_path, + .content = ft_str2bytes(ft_strbuf_ref(&buf))); + + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Writting database map"); + + ft_strbuf_free(&buf); /* Add metadata to backup_content.control */ file = pgFileNew(database_map_path, DATABASE_MAP, true, 0, drive); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 4f20e91e6..5799bda0c 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1012,7 +1012,6 @@ extern int check_tablespace_mapping(pgBackup *backup, bool incremental, bool fo extern void check_external_dir_mapping(pgBackup *backup, bool incremental); extern char *get_external_remap(char *current_dir); -extern void print_database_map(FILE *out, parray *database_list); extern void write_database_map(pgBackup *backup, parray *database_list, parray *backup_file_list); extern void db_map_entry_free(void *map); From f12ff418d75cb91abfd986be828258fbb14c1501 Mon Sep 17 00:00:00 2001 From: Sergey Fukanchik Date: Tue, 29 Nov 2022 15:21:04 +0300 Subject: [PATCH 111/339] PBCKP-337 switch to pio in config_read_opt --- src/utils/configuration.c | 46 ++++++++++++++++++++++++++------------- 1 file changed, 31 insertions(+), 15 deletions(-) diff --git a/src/utils/configuration.c b/src/utils/configuration.c index 4bfa2447f..5622532fc 100644 --- a/src/utils/configuration.c +++ b/src/utils/configuration.c @@ -544,6 +544,16 @@ config_get_opt(int argc, char **argv, ConfigOption cmd_options[], return optind; } +static void +ft_bytes_strip_right(ft_bytes_t *line) +{ + size_t i; + + for (i = line->len; i > 0 && IsSpace(line->ptr[i - 1]); i--) + line->ptr[i - 1] = '\0'; + line->len = i; +} + /* * Get configuration from configuration file. * Return number of parsed options. @@ -552,32 +562,41 @@ int config_read_opt(const char *path, ConfigOption options[], int elevel, bool strict, bool missing_ok) { - FILE *fp; - char buf[4096]; + pioDrive_i local_drive = pioDriveForLocation(FIO_BACKUP_HOST); char key[1024]; char value[2048]; int parsed_options = 0; + err_i err = $noerr(); + ft_bytes_t config_file, to_free; if (!options) return parsed_options; - if ((fp = fio_open_stream(FIO_BACKUP_HOST, path)) == NULL) + config_file = $i(pioReadFile, local_drive, .path = path, .binary = false, + .err = &err); + if ($haserr(err)) { - if (missing_ok && errno == ENOENT) - return parsed_options; + ft_bytes_free(&config_file); - elog(ERROR, "could not open file \"%s\": %s", - path, strerror(errno)); + if (missing_ok && getErrno(err) == ENOENT) + return parsed_options; + + ft_logerr(FT_FATAL, $errmsg(err), "could not read file"); + return parsed_options; } + to_free = config_file; - while (fgets(buf, lengthof(buf), fp)) + while (true) { size_t i; + ft_bytes_t line = ft_bytes_shift_line(&config_file); - for (i = strlen(buf); i > 0 && IsSpace(buf[i - 1]); i--) - buf[i - 1] = '\0'; + if (line.len == 0) + break; - if (parse_pair(buf, key, value)) + ft_bytes_strip_right(&line); + + if (parse_pair(line.ptr, key, value)) { for (i = 0; options[i].type; i++) { @@ -602,10 +621,7 @@ config_read_opt(const char *path, ConfigOption options[], int elevel, } } - if (ferror(fp)) - elog(ERROR, "Failed to read from file: \"%s\"", path); - - fio_close_stream(fp); + ft_bytes_free(&to_free); return parsed_options; } From 1dc40334760e9bd285dde41a3cbcfe12a9dd4567 Mon Sep 17 00:00:00 2001 From: Victor Spirin Date: Tue, 29 Nov 2022 16:36:35 +0300 Subject: [PATCH 112/339] Added include catalog/catalog.h to file.c for PG10 --- src/dir.c | 4 ---- src/utils/file.c | 3 +++ 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/dir.c b/src/dir.c index 124e76b37..469967599 100644 --- a/src/dir.c +++ b/src/dir.c @@ -13,10 +13,6 @@ #include "utils/file.h" -#if PG_VERSION_NUM < 110000 -#include "catalog/catalog.h" -#endif - #include #include diff --git a/src/utils/file.c b/src/utils/file.c index edf1a54f0..7519d2f5d 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -6,6 +6,9 @@ #include "file.h" #include "catalog/pg_tablespace.h" +#if PG_VERSION_NUM < 110000 +#include "catalog/catalog.h" +#endif #include "storage/checksum.h" #define PRINTF_BUF_SIZE 1024 From a21b4c04866484d374b1d34ca7924496b33c7ebf Mon Sep 17 00:00:00 2001 From: Victor Spirin Date: Wed, 23 Nov 2022 02:05:45 +0300 Subject: [PATCH 113/339] [PBCKP-279] Removed basebackup sources dependency. --- Makefile | 23 +- src/backup.c | 2 - src/catchup.c | 2 - src/compatibility/file_compat.c | 152 ++++ src/compatibility/file_compat.h | 32 + src/compatibility/file_compat10.c | 84 ++ src/compatibility/file_compat10.h | 172 ++++ src/compatibility/logging.h | 37 + src/compatibility/receivelog.c | 1288 +++++++++++++++++++++++++++++ src/compatibility/receivelog.h | 57 ++ src/compatibility/streamutil.c | 281 +++++++ src/compatibility/streamutil.h | 36 + src/compatibility/walmethods.c | 467 +++++++++++ src/compatibility/walmethods.h | 101 +++ src/pg_probackup.c | 10 - src/pg_probackup.h | 26 +- src/stream.c | 27 +- src/utils/pgut.c | 16 +- src/utils/simple_prompt.c | 159 ++++ src/utils/simple_prompt.h | 7 + 20 files changed, 2889 insertions(+), 90 deletions(-) create mode 100644 src/compatibility/file_compat.c create mode 100644 src/compatibility/file_compat.h create mode 100644 src/compatibility/file_compat10.c create mode 100644 src/compatibility/file_compat10.h create mode 100644 src/compatibility/logging.h create mode 100644 src/compatibility/receivelog.c create mode 100644 src/compatibility/receivelog.h create mode 100644 src/compatibility/streamutil.c create mode 100644 src/compatibility/streamutil.h create mode 100644 src/compatibility/walmethods.c create mode 100644 src/compatibility/walmethods.h create mode 100644 src/utils/simple_prompt.c create mode 100644 src/utils/simple_prompt.h diff --git a/Makefile b/Makefile index 3c8cb2c15..16b645c69 100644 --- a/Makefile +++ b/Makefile @@ -37,18 +37,15 @@ OBJS += src/archive.o src/backup.o src/catalog.o src/checkdb.o src/configure.o s src/delete.o src/dir.o src/fetch.o src/help.o src/init.o src/merge.o \ src/parsexlog.o src/ptrack.o src/pg_probackup.o src/restore.o src/show.o src/stream.o \ src/util.o src/validate.o src/datapagemap.o src/catchup.o \ - src/compatibility/pg-11.o + src/compatibility/pg-11.o src/utils/simple_prompt.o +OBJS += src/compatibility/file_compat.o src/compatibility/receivelog.o \ + src/compatibility/streamutil.o \ + src/compatibility/walmethods.o src/compatibility/file_compat10.o # sources borrowed from postgresql (paths are relative to pg top dir) -BORROWED_H_SRC := \ - src/bin/pg_basebackup/receivelog.h \ - src/bin/pg_basebackup/streamutil.h \ - src/bin/pg_basebackup/walmethods.h +BORROWED_H_SRC := BORROWED_C_SRC := \ - src/backend/access/transam/xlogreader.c \ - src/bin/pg_basebackup/receivelog.c \ - src/bin/pg_basebackup/streamutil.c \ - src/bin/pg_basebackup/walmethods.c + src/backend/access/transam/xlogreader.c BORROW_DIR := src/borrowed BORROWED_H := $(addprefix $(BORROW_DIR)/, $(notdir $(BORROWED_H_SRC))) @@ -77,7 +74,7 @@ include $(top_srcdir)/contrib/contrib-global.mk endif # -PG_CPPFLAGS = -I$(libpq_srcdir) ${PTHREAD_CFLAGS} -I$(top_pbk_srcdir)src -I$(BORROW_DIR) +PG_CPPFLAGS = -I$(libpq_srcdir) ${PTHREAD_CFLAGS} -I$(top_pbk_srcdir)src -I$(BORROW_DIR) -Isrc/compatibility -Isrc/utils PG_CPPFLAGS += -I$(top_pbk_srcdir)src/fu_util -Wno-declaration-after-statement ifdef VPATH PG_CPPFLAGS += -Isrc @@ -86,9 +83,9 @@ override CPPFLAGS := -DFRONTEND $(CPPFLAGS) $(PG_CPPFLAGS) PG_LIBS_INTERNAL = $(libpq_pgport) ${PTHREAD_CFLAGS} # additional dependencies on borrowed files -src/backup.o src/catchup.o src/pg_probackup.o: $(BORROW_DIR)/streamutil.h -src/stream.o $(BORROW_DIR)/receivelog.o $(BORROW_DIR)/streamutil.o $(BORROW_DIR)/walmethods.o: $(BORROW_DIR)/receivelog.h -$(BORROW_DIR)/receivelog.h: $(BORROW_DIR)/walmethods.h +src/backup.o src/catchup.o src/pg_probackup.o: src/compatibility/streamutil.h +src/stream.o src/compatibility/receivelog.o src/compatibility/streamutil.o src/compatibility/walmethods.o: src/compatibility/receivelog.h +src/compatibility/receivelog.h: src/compatibility/walmethods.h # generate separate makefile to handle borrowed files borrowed.mk: $(firstword $(MAKEFILE_LIST)) diff --git a/src/backup.c b/src/backup.c index 501aba2a7..44f1905bd 100644 --- a/src/backup.c +++ b/src/backup.c @@ -812,10 +812,8 @@ do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, check_system_identifiers(backup_conn, instance_config.pgdata); /* below perform checks specific for backup command */ -#if PG_VERSION_NUM >= 110000 if (!RetrieveWalSegSize(backup_conn)) elog(ERROR, "Failed to retrieve wal_segment_size"); -#endif get_ptrack_version(backup_conn, &nodeInfo); // elog(WARNING, "ptrack_version_num %d", ptrack_version_num); diff --git a/src/catchup.c b/src/catchup.c index 5bb82ae77..8ad187606 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -55,10 +55,8 @@ catchup_init_state(PGNodeInfo *source_node_info, const char *source_pgdata, cons /* Do some compatibility checks and fill basic info about PG instance */ source_conn = pgdata_basic_setup(instance_config.conn_opt, source_node_info); -#if PG_VERSION_NUM >= 110000 if (!RetrieveWalSegSize(source_conn)) elog(ERROR, "Failed to retrieve wal_segment_size"); -#endif get_ptrack_version(source_conn, source_node_info); if (source_node_info->ptrack_version_num > 0) diff --git a/src/compatibility/file_compat.c b/src/compatibility/file_compat.c new file mode 100644 index 000000000..d465c53a1 --- /dev/null +++ b/src/compatibility/file_compat.c @@ -0,0 +1,152 @@ +#include +#include +#include + +#include "logging.h" +#include "file_compat.h" +/*vvs*/ +/* + * fsync_fname -- Try to fsync a file or directory + * + * Ignores errors trying to open unreadable files, or trying to fsync + * directories on systems where that isn't allowed/required. All other errors + * are fatal. + */ +int +fsync_fname_compat(const char* fname, bool isdir) +{ + int fd; + int flags; + int returncode; + + /* + * Some OSs require directories to be opened read-only whereas other + * systems don't allow us to fsync files opened read-only; so we need both + * cases here. Using O_RDWR will cause us to fail to fsync files that are + * not writable by our userid, but we assume that's OK. + */ + flags = PG_BINARY; + if (!isdir) + flags |= O_RDWR; + else + flags |= O_RDONLY; + + /* + * Open the file, silently ignoring errors about unreadable files (or + * unsupported operations, e.g. opening a directory under Windows), and + * logging others. + */ + fd = open(fname, flags, 0); + if (fd < 0) + { + if (errno == EACCES || (isdir && errno == EISDIR)) + return 0; + pg_log_error("could not open file \"%s\": %m", fname); + return -1; + } + + returncode = fsync(fd); + + /* + * Some OSes don't allow us to fsync directories at all, so we can ignore + * those errors. Anything else needs to be reported. + */ + if (returncode != 0 && !(isdir && (errno == EBADF || errno == EINVAL))) + { + pg_log_fatal("could not fsync file \"%s\": %m", fname); + (void)close(fd); + exit(EXIT_FAILURE); + } + + (void)close(fd); + return 0; +} + +/* + * fsync_parent_path -- fsync the parent path of a file or directory + * + * This is aimed at making file operations persistent on disk in case of + * an OS crash or power failure. + */ +int +fsync_parent_path_compat(const char* fname) +{ + char parentpath[MAXPGPATH]; + + strlcpy(parentpath, fname, MAXPGPATH); + get_parent_directory(parentpath); + + /* + * get_parent_directory() returns an empty string if the input argument is + * just a file name (see comments in path.c), so handle that as being the + * current directory. + */ + if (strlen(parentpath) == 0) + strlcpy(parentpath, ".", MAXPGPATH); + + if (fsync_fname_compat(parentpath, true) != 0) + return -1; + + return 0; +} + +/* + * durable_rename -- rename(2) wrapper, issuing fsyncs required for durability + * + * Wrapper around rename, similar to the backend version. + */ +int +durable_rename_compat(const char* oldfile, const char* newfile) +{ + int fd; + + /* + * First fsync the old and target path (if it exists), to ensure that they + * are properly persistent on disk. Syncing the target file is not + * strictly necessary, but it makes it easier to reason about crashes; + * because it's then guaranteed that either source or target file exists + * after a crash. + */ + if (fsync_fname_compat(oldfile, false) != 0) + return -1; + + fd = open(newfile, PG_BINARY | O_RDWR, 0); + if (fd < 0) + { + if (errno != ENOENT) + { + pg_log_error("could not open file \"%s\": %m", newfile); + return -1; + } + } + else + { + if (fsync(fd) != 0) + { + pg_log_fatal("could not fsync file \"%s\": %m", newfile); + close(fd); + exit(EXIT_FAILURE); + } + close(fd); + } + + /* Time to do the real deal... */ + if (rename(oldfile, newfile) != 0) + { + pg_log_error("could not rename file \"%s\" to \"%s\": %m", + oldfile, newfile); + return -1; + } + + /* + * To guarantee renaming the file is persistent, fsync the file with its + * new name, and its containing directory. + */ + if (fsync_fname_compat(newfile, false) != 0) + return -1; + + if (fsync_parent_path_compat(newfile) != 0) + return -1; + + return 0; +} diff --git a/src/compatibility/file_compat.h b/src/compatibility/file_compat.h new file mode 100644 index 000000000..1000e8e92 --- /dev/null +++ b/src/compatibility/file_compat.h @@ -0,0 +1,32 @@ +#ifndef FILE_COMPAT_H +#define FILE_COMPAT_H + +#include +#include "datatype/timestamp.h" + + +#if PG_VERSION_NUM >= 120000 +#include "common/logging.h" +#else +#include "logging.h" +#endif + + +extern int fsync_parent_path_compat(const char* fname); +extern int fsync_fname_compat(const char* fname, bool isdir); +extern int durable_rename_compat(const char* oldfile, const char* newfile); + + +#if PG_VERSION_NUM < 110000 +#include "file_compat10.h" +#else +#include "common/file_perm.h" +#include "access/xlog_internal.h" +#endif + + + +#endif /* FILE_COMPAT_H */ + + + diff --git a/src/compatibility/file_compat10.c b/src/compatibility/file_compat10.c new file mode 100644 index 000000000..e125bf13d --- /dev/null +++ b/src/compatibility/file_compat10.c @@ -0,0 +1,84 @@ +#include + +#if PG_VERSION_NUM < 110000 + +#include +#include +#include "logging.h" + +#include "file_compat.h" + +/* Modes for creating directories and files in the data directory */ +int pg_dir_create_mode = PG_DIR_MODE_OWNER; +int pg_file_create_mode = PG_FILE_MODE_OWNER; +/* + * Mode mask to pass to umask(). This is more of a preventative measure since + * all file/directory creates should be performed using the create modes above. + */ +int pg_mode_mask = PG_MODE_MASK_OWNER; + + +/* + * Set create modes and mask to use when writing to PGDATA based on the data + * directory mode passed. If group read/execute are present in the mode, then + * create modes and mask will be relaxed to allow group read/execute on all + * newly created files and directories. + */ +void +SetDataDirectoryCreatePerm(int dataDirMode) +{ + /* If the data directory mode has group access */ + if ((PG_DIR_MODE_GROUP & dataDirMode) == PG_DIR_MODE_GROUP) + { + pg_dir_create_mode = PG_DIR_MODE_GROUP; + pg_file_create_mode = PG_FILE_MODE_GROUP; + pg_mode_mask = PG_MODE_MASK_GROUP; + } + /* Else use default permissions */ + else + { + pg_dir_create_mode = PG_DIR_MODE_OWNER; + pg_file_create_mode = PG_FILE_MODE_OWNER; + pg_mode_mask = PG_MODE_MASK_OWNER; + } +} + + + +/* + * Get the create modes and mask to use when writing to PGDATA by examining the + * mode of the PGDATA directory and calling SetDataDirectoryCreatePerm(). + * + * Errors are not handled here and should be reported by the application when + * false is returned. + * + * Suppress when on Windows, because there may not be proper support for Unix-y + * file permissions. + */ +bool +GetDataDirectoryCreatePerm(const char* dataDir) +{ +#if !defined(WIN32) && !defined(__CYGWIN__) + struct stat statBuf; + + /* + * If an error occurs getting the mode then return false. The caller is + * responsible for generating an error, if appropriate, indicating that we + * were unable to access the data directory. + */ + if (stat(dataDir, &statBuf) == -1) + return false; + + /* Set permissions */ + SetDataDirectoryCreatePerm(statBuf.st_mode); + return true; +#else /* !defined(WIN32) && !defined(__CYGWIN__) */ + /* + * On Windows, we don't have anything to do here since they don't have + * Unix-y permissions. + */ + return true; +#endif +} + +#endif diff --git a/src/compatibility/file_compat10.h b/src/compatibility/file_compat10.h new file mode 100644 index 000000000..c40dda133 --- /dev/null +++ b/src/compatibility/file_compat10.h @@ -0,0 +1,172 @@ +#ifndef FILE_COMPAT10_H +#define FILE_COMPAT10_H + +//for PG10 + +//#include "pg_bswap.h" + + +#ifndef DEFAULT_XLOG_SEG_SIZE +#define DEFAULT_XLOG_SEG_SIZE (16*1024*1024) +#endif + + +#ifndef PG_FILE_MODE_OWNER + +/* + * Mode mask for data directory permissions that only allows the owner to + * read/write directories and files. + * + * This is the default. + */ +#define PG_MODE_MASK_OWNER (S_IRWXG | S_IRWXO) + + /* + * Mode mask for data directory permissions that also allows group read/execute. + */ +#define PG_MODE_MASK_GROUP (S_IWGRP | S_IRWXO) + + + +#define PG_FILE_MODE_OWNER (S_IRUSR | S_IWUSR) +//#define pg_file_create_mode PG_FILE_MODE_OWNER + +/* Default mode for creating directories */ +#define PG_DIR_MODE_OWNER S_IRWXU + +/* Mode for creating directories that allows group read/execute */ +#define PG_DIR_MODE_GROUP (S_IRWXU | S_IRGRP | S_IXGRP) + +/* Default mode for creating files */ +#define PG_FILE_MODE_OWNER (S_IRUSR | S_IWUSR) + +/* Mode for creating files that allows group read */ +#define PG_FILE_MODE_GROUP (S_IRUSR | S_IWUSR | S_IRGRP) + +/* Modes for creating directories and files in the data directory */ +extern int pg_dir_create_mode; +extern int pg_file_create_mode; + +/* Mode mask to pass to umask() */ +extern int pg_mode_mask; + +/* Set permissions and mask based on the provided mode */ +extern void SetDataDirectoryCreatePerm(int dataDirMode); + +/* Set permissions and mask based on the mode of the data directory */ +extern bool GetDataDirectoryCreatePerm(const char* dataDir); + +#endif + +/* Set permissions and mask based on the provided mode */ +extern void SetDataDirectoryCreatePerm(int dataDirMode); + +/* Set permissions and mask based on the mode of the data directory */ +extern bool GetDataDirectoryCreatePerm(const char *dataDir); + + +/* wal_segment_size can range from 1MB to 1GB */ +#define WalSegMinSize 1024 * 1024 +#define WalSegMaxSize 1024 * 1024 * 1024 + + +#define XLogSegmentOffset(xlogptr, wal_segsz_bytes) \ + ((xlogptr) & ((wal_segsz_bytes) - 1)) + +/* check that the given size is a valid wal_segment_size */ +#define IsPowerOf2(x) (x > 0 && ((x) & ((x)-1)) == 0) + +#define IsValidWalSegSize(size) \ + (IsPowerOf2(size) && \ + ((size) >= WalSegMinSize && (size) <= WalSegMaxSize)) + + + +/* From access/xlog_internal.h */ + +#undef XLByteToSeg +#undef XLogFileName +#undef XLogSegmentsPerXLogId +#undef XLogFromFileName +#undef XLogSegNoOffsetToRecPtr +#undef XLByteInSeg + +#define XLByteToSeg(xlrp, logSegNo, wal_segsz_bytes) \ + logSegNo = (xlrp) / (wal_segsz_bytes) + +#define XLogFileName(fname, tli, logSegNo, wal_segsz_bytes) \ + snprintf(fname, MAXFNAMELEN, "%08X%08X%08X", tli, \ + (uint32) ((logSegNo) / XLogSegmentsPerXLogId(wal_segsz_bytes)), \ + (uint32) ((logSegNo) % XLogSegmentsPerXLogId(wal_segsz_bytes))) + +#define XLogSegmentsPerXLogId(wal_segsz_bytes) \ + (UINT64CONST(0x100000000) / (wal_segsz_bytes)) + + +/* + * The XLog directory and control file (relative to $PGDATA) + */ +#define XLOGDIR "pg_wal" +#define XLOG_CONTROL_FILE "global/pg_control" + +/* + * These macros encapsulate knowledge about the exact layout of XLog file + * names, timeline history file names, and archive-status file names. + */ +#define MAXFNAMELEN 64 + +/* Length of XLog file name */ +#define XLOG_FNAME_LEN 24 + +/* + * Generate a WAL segment file name. Do not use this macro in a helper + * function allocating the result generated. + */ +#define XLogFileName(fname, tli, logSegNo, wal_segsz_bytes) \ + snprintf(fname, MAXFNAMELEN, "%08X%08X%08X", tli, \ + (uint32) ((logSegNo) / XLogSegmentsPerXLogId(wal_segsz_bytes)), \ + (uint32) ((logSegNo) % XLogSegmentsPerXLogId(wal_segsz_bytes))) + +#define XLogFileNameById(fname, tli, log, seg) \ + snprintf(fname, MAXFNAMELEN, "%08X%08X%08X", tli, log, seg) + +#define IsXLogFileName(fname) \ + (strlen(fname) == XLOG_FNAME_LEN && \ + strspn(fname, "0123456789ABCDEF") == XLOG_FNAME_LEN) + + +#define XLogFromFileName(fname, tli, logSegNo, wal_segsz_bytes) \ + do { \ + uint32 log; \ + uint32 seg; \ + sscanf(fname, "%08X%08X%08X", tli, &log, &seg); \ + *logSegNo = (uint64) log * XLogSegmentsPerXLogId(wal_segsz_bytes) + seg; \ + } while (0) + +#define XLogSegNoOffsetToRecPtr(segno, offset, wal_segsz_bytes, dest) \ + (dest) = (segno) * (wal_segsz_bytes) + (offset) +/* + * Is an XLogRecPtr within a particular XLOG segment? + * + * For XLByteInSeg, do the computation at face value. For XLByteInPrevSeg, + * a boundary byte is taken to be in the previous segment. + */ +#define XLByteInSeg(xlrp, logSegNo, wal_segsz_bytes) \ + (((xlrp) / (wal_segsz_bytes)) == (logSegNo)) + + + +/* logs restore point */ +/* +typedef struct xl_restore_point +{ + TimestampTz rp_time; + char rp_name[MAXFNAMELEN]; +} xl_restore_point; +*/ + + +#endif /* FILE_COMPAT10_H */ + + + diff --git a/src/compatibility/logging.h b/src/compatibility/logging.h new file mode 100644 index 000000000..4fbf427dc --- /dev/null +++ b/src/compatibility/logging.h @@ -0,0 +1,37 @@ +/*------------------------------------------------------------------------- + * Logging framework for frontend programs + * + * Copyright (c) 2018-2021, PostgreSQL Global Development Group + * Portions Copyright (c) 2021-2022, Postgres Professional + * + * src/include/common/logging.h + * + *------------------------------------------------------------------------- + */ +#ifndef COMMON_LOGGING_COMPAT_H +#define COMMON_LOGGING_COMPAT_H + +#if PG_VERSION_NUM >= 120000 && PG_VERSION_NUM < 150000 +#include "common/logging.h" + + +#else + +#include +#include "logger.h" + + +#define pg_log_fatal(...) elog(ERROR, __VA_ARGS__); + +#if PG_VERSION_NUM < 150000 +#define pg_log_error(...) elog(ERROR, __VA_ARGS__); +#define pg_log_warning(...) elog(WARNING, __VA_ARGS__); +#define pg_log_info(...) elog(INFO, __VA_ARGS__); +#endif + +#endif + +#endif /* COMMON_LOGGING_COMPAT_H */ + + + diff --git a/src/compatibility/receivelog.c b/src/compatibility/receivelog.c new file mode 100644 index 000000000..5b32ae7bb --- /dev/null +++ b/src/compatibility/receivelog.c @@ -0,0 +1,1288 @@ +/*------------------------------------------------------------------------- + * + * receivelog.c - receive WAL files using the streaming + * replication protocol. + * + * Author: Magnus Hagander + * + * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/bin/pg_basebackup/receivelog.c + *------------------------------------------------------------------------- + */ + +#include "pg_probackup.h" +#include "postgres_fe.h" + +#include +#include +#ifdef HAVE_SYS_SELECT_H +#include +#endif + +#include "common/file_utils.h" +#include "logging.h" +#include "libpq-fe.h" +#include "receivelog.h" +#include "streamutil.h" + +#include "file_compat.h" + + + /* + * Handy macro for printing XLogRecPtr in conventional format, e.g., + * + * printf("%X/%X", LSN_FORMAT_ARGS(lsn)); + */ +#ifndef LSN_FORMAT_ARGS +#define LSN_FORMAT_ARGS(lsn) (AssertVariableIsOfTypeMacro((lsn), XLogRecPtr), (uint32) ((lsn) >> 32)), ((uint32) (lsn)) +#endif + +/* fd and filename for currently open WAL file */ +static Walfile *walfile = NULL; +static char current_walfile_name[MAXPGPATH] = ""; +static bool reportFlushPosition = false; +static XLogRecPtr lastFlushPosition = InvalidXLogRecPtr; + +static bool still_sending = true; /* feedback still needs to be sent? */ + +static PGresult *HandleCopyStream(PGconn *conn, StreamCtl *stream, + XLogRecPtr *stoppos); +static int CopyStreamPoll(PGconn *conn, long timeout_ms, pgsocket stop_socket); +static int CopyStreamReceive(PGconn *conn, long timeout, pgsocket stop_socket, + char **buffer); +static bool ProcessKeepaliveMsg(PGconn *conn, StreamCtl *stream, char *copybuf, + int len, XLogRecPtr blockpos, TimestampTz *last_status); +static bool ProcessXLogDataMsg(PGconn *conn, StreamCtl *stream, char *copybuf, int len, + XLogRecPtr *blockpos); +static PGresult *HandleEndOfCopyStream(PGconn *conn, StreamCtl *stream, char *copybuf, + XLogRecPtr blockpos, XLogRecPtr *stoppos); +static bool CheckCopyStreamStop(PGconn *conn, StreamCtl *stream, XLogRecPtr blockpos); +static long CalculateCopyStreamSleeptime(TimestampTz now, int standby_message_timeout, + TimestampTz last_status); + +static bool ReadEndOfStreamingResult(PGresult *res, XLogRecPtr *startpos, + uint32 *timeline); + +static bool +mark_file_as_archived(StreamCtl *stream, const char *fname) +{ + Walfile *f; + static char tmppath[MAXPGPATH]; + + snprintf(tmppath, sizeof(tmppath), "archive_status/%s.done", + fname); + + f = stream->walmethod->open_for_write(tmppath, NULL, 0); + if (f == NULL) + { + elog(ERROR, "could not create archive status file \"%s\": %s", + tmppath, stream->walmethod->getlasterror()); + return false; + } + + if (stream->walmethod->close(f, CLOSE_NORMAL) != 0) + { + elog(ERROR, "could not close archive status file \"%s\": %s", + tmppath, stream->walmethod->getlasterror()); + return false; + } + + return true; +} + +/* + * Open a new WAL file in the specified directory. + * + * Returns true if OK; on failure, returns false after printing an error msg. + * On success, 'walfile' is set to the FD for the file, and the base filename + * (without partial_suffix) is stored in 'current_walfile_name'. + * + * The file will be padded to 16Mb with zeroes. + */ +static bool +open_walfile(StreamCtl *stream, XLogRecPtr startpoint) +{ + Walfile *f; + char *fn; + ssize_t size; + XLogSegNo segno; + + XLByteToSeg(startpoint, segno, WalSegSz); + XLogFileName(current_walfile_name, stream->timeline, segno, WalSegSz); + + /* Note that this considers the compression used if necessary */ + fn = stream->walmethod->get_file_name(current_walfile_name, + stream->partial_suffix); + + /* + * When streaming to files, if an existing file exists we verify that it's + * either empty (just created), or a complete WalSegSz segment (in which + * case it has been created and padded). Anything else indicates a corrupt + * file. Compressed files have no need for padding, so just ignore this + * case. + * + * When streaming to tar, no file with this name will exist before, so we + * never have to verify a size. + */ + if (stream->walmethod->compression() == 0 && + stream->walmethod->existsfile(fn)) + { + size = stream->walmethod->get_file_size(fn); + if (size < 0) + { + elog(ERROR, "could not get size of write-ahead log file \"%s\": %s", + fn, stream->walmethod->getlasterror()); + pg_free(fn); + return false; + } + if (size == WalSegSz) + { + /* Already padded file. Open it for use */ + f = stream->walmethod->open_for_write(current_walfile_name, stream->partial_suffix, 0); + if (f == NULL) + { + elog(ERROR, "could not open existing write-ahead log file \"%s\": %s", + fn, stream->walmethod->getlasterror()); + pg_free(fn); + return false; + } + + /* fsync file in case of a previous crash */ + if (stream->walmethod->sync(f) != 0) + { + elog(ERROR, "could not fsync existing write-ahead log file \"%s\": %s", + fn, stream->walmethod->getlasterror());//FATAL + stream->walmethod->close(f, CLOSE_UNLINK); + exit(1); + } + + walfile = f; + pg_free(fn); + return true; + } + if (size != 0) + { + /* if write didn't set errno, assume problem is no disk space */ + if (errno == 0) + errno = ENOSPC; + elog(ERROR, ngettext("write-ahead log file \"%s\" has %d byte, should be 0 or %d", + "write-ahead log file \"%s\" has %d bytes, should be 0 or %d", + size), + fn, (int) size, WalSegSz); + pg_free(fn); + return false; + } + /* File existed and was empty, so fall through and open */ + } + + /* No file existed, so create one */ + + f = stream->walmethod->open_for_write(current_walfile_name, + stream->partial_suffix, WalSegSz); + if (f == NULL) + { + elog(ERROR, "could not open write-ahead log file \"%s\": %s", + fn, stream->walmethod->getlasterror()); + pg_free(fn); + return false; + } + + pg_free(fn); + walfile = f; + return true; +} + +/* + * Close the current WAL file (if open), and rename it to the correct + * filename if it's complete. On failure, prints an error message to stderr + * and returns false, otherwise returns true. + */ +static bool +close_walfile(StreamCtl *stream, XLogRecPtr pos) +{ + off_t currpos; + int r; + + if (walfile == NULL) + return true; + + currpos = stream->walmethod->get_current_pos(walfile); + if (currpos == -1) + { + elog(ERROR, "could not determine seek position in file \"%s\": %s", + current_walfile_name, stream->walmethod->getlasterror()); + stream->walmethod->close(walfile, CLOSE_UNLINK); + walfile = NULL; + + return false; + } + + if (stream->partial_suffix) + { + if (currpos == WalSegSz) + r = stream->walmethod->close(walfile, CLOSE_NORMAL); + else + { + elog(INFO, "not renaming \"%s%s\", segment is not complete", + current_walfile_name, stream->partial_suffix); + r = stream->walmethod->close(walfile, CLOSE_NO_RENAME); + } + } + else + r = stream->walmethod->close(walfile, CLOSE_NORMAL); + + walfile = NULL; + + if (r != 0) + { + elog(ERROR, "could not close file \"%s\": %s", + current_walfile_name, stream->walmethod->getlasterror()); + return false; + } + + /* + * Mark file as archived if requested by the caller - pg_basebackup needs + * to do so as files can otherwise get archived again after promotion of a + * new node. This is in line with walreceiver.c always doing a + * XLogArchiveForceDone() after a complete segment. + */ + if (currpos == WalSegSz && stream->mark_done) + { + /* writes error message if failed */ + if (!mark_file_as_archived(stream, current_walfile_name)) + return false; + } + + lastFlushPosition = pos; + return true; +} + + +/* + * Check if a timeline history file exists. + */ +static bool +existsTimeLineHistoryFile(StreamCtl *stream) +{ + char histfname[MAXFNAMELEN]; + + /* + * Timeline 1 never has a history file. We treat that as if it existed, + * since we never need to stream it. + */ + if (stream->timeline == 1) + return true; + + TLHistoryFileName(histfname, stream->timeline); + + return stream->walmethod->existsfile(histfname); +} + +static bool +writeTimeLineHistoryFile(StreamCtl *stream, char *filename, char *content) +{ + int size = strlen(content); + char histfname[MAXFNAMELEN]; + Walfile *f; + + /* + * Check that the server's idea of how timeline history files should be + * named matches ours. + */ + TLHistoryFileName(histfname, stream->timeline); + if (strcmp(histfname, filename) != 0) + { + pg_log_error("server reported unexpected history file name for timeline %u: %s", + stream->timeline, filename); + return false; + } + + f = stream->walmethod->open_for_write(histfname, ".tmp", 0); + if (f == NULL) + { + pg_log_error("could not create timeline history file \"%s\": %s", + histfname, stream->walmethod->getlasterror()); + return false; + } + + if ((int) stream->walmethod->write(f, content, size) != size) + { + pg_log_error("could not write timeline history file \"%s\": %s", + histfname, stream->walmethod->getlasterror()); + + /* + * If we fail to make the file, delete it to release disk space + */ + stream->walmethod->close(f, CLOSE_UNLINK); + + return false; + } + + if (stream->walmethod->close(f, CLOSE_NORMAL) != 0) + { + pg_log_error("could not close file \"%s\": %s", + histfname, stream->walmethod->getlasterror()); + return false; + } + + /* Maintain archive_status, check close_walfile() for details. */ + if (stream->mark_done) + { + /* writes error message if failed */ + if (!mark_file_as_archived(stream, histfname)) + return false; + } + + return true; +} + +/* + * Send a Standby Status Update message to server. + */ +static bool +sendFeedback(PGconn *conn, XLogRecPtr blockpos, TimestampTz now, bool replyRequested) +{ + char replybuf[1 + 8 + 8 + 8 + 8 + 1]; + int len = 0; + + replybuf[len] = 'r'; + len += 1; + fe_sendint64(blockpos, &replybuf[len]); /* write */ + len += 8; + if (reportFlushPosition) + fe_sendint64(lastFlushPosition, &replybuf[len]); /* flush */ + else + fe_sendint64(InvalidXLogRecPtr, &replybuf[len]); /* flush */ + len += 8; + fe_sendint64(InvalidXLogRecPtr, &replybuf[len]); /* apply */ + len += 8; + fe_sendint64(now, &replybuf[len]); /* sendTime */ + len += 8; + replybuf[len] = replyRequested ? 1 : 0; /* replyRequested */ + len += 1; + + if (PQputCopyData(conn, replybuf, len) <= 0 || PQflush(conn)) + { + pg_log_error("could not send feedback packet: %s", + PQerrorMessage(conn)); + return false; + } + + return true; +} + +/* + * Check that the server version we're connected to is supported by + * ReceiveXlogStream(). + * + * If it's not, an error message is printed to stderr, and false is returned. + */ +bool +CheckServerVersionForStreaming(PGconn *conn) +{ + int minServerMajor, + maxServerMajor; + int serverMajor; + + /* + * The message format used in streaming replication changed in 9.3, so we + * cannot stream from older servers. And we don't support servers newer + * than the client; it might work, but we don't know, so err on the safe + * side. + */ + minServerMajor = 903; + maxServerMajor = PG_VERSION_NUM / 100; + serverMajor = PQserverVersion(conn) / 100; + if (serverMajor < minServerMajor) + { + const char *serverver = PQparameterStatus(conn, "server_version"); + + pg_log_error("incompatible server version %s; client does not support streaming from server versions older than %s", + serverver ? serverver : "'unknown'", + "9.3"); + return false; + } + else if (serverMajor > maxServerMajor) + { + const char *serverver = PQparameterStatus(conn, "server_version"); + + pg_log_error("incompatible server version %s; client does not support streaming from server versions newer than %s", + serverver ? serverver : "'unknown'", + PG_VERSION); + return false; + } + return true; +} + +/* + * Receive a log stream starting at the specified position. + * + * Individual parameters are passed through the StreamCtl structure. + * + * If sysidentifier is specified, validate that both the system + * identifier and the timeline matches the specified ones + * (by sending an extra IDENTIFY_SYSTEM command) + * + * All received segments will be written to the directory + * specified by basedir. This will also fetch any missing timeline history + * files. + * + * The stream_stop callback will be called every time data + * is received, and whenever a segment is completed. If it returns + * true, the streaming will stop and the function + * return. As long as it returns false, streaming will continue + * indefinitely. + * + * If stream_stop() checks for external input, stop_socket should be set to + * the FD it checks. This will allow such input to be detected promptly + * rather than after standby_message_timeout (which might be indefinite). + * Note that signals will interrupt waits for input as well, but that is + * race-y since a signal received while busy won't interrupt the wait. + * + * standby_message_timeout controls how often we send a message + * back to the primary letting it know our progress, in milliseconds. + * Zero means no messages are sent. + * This message will only contain the write location, and never + * flush or replay. + * + * If 'partial_suffix' is not NULL, files are initially created with the + * given suffix, and the suffix is removed once the file is finished. That + * allows you to tell the difference between partial and completed files, + * so that you can continue later where you left. + * + * If 'synchronous' is true, the received WAL is flushed as soon as written, + * otherwise only when the WAL file is closed. + * + * Note: The WAL location *must* be at a log segment start! + */ +bool +ReceiveXlogStream(PGconn *conn, StreamCtl *stream) +{ + char query[128]; + char slotcmd[128]; + PGresult *res; + XLogRecPtr stoppos; + + /* + * The caller should've checked the server version already, but doesn't do + * any harm to check it here too. + */ + if (!CheckServerVersionForStreaming(conn)) + return false; + + /* + * Decide whether we want to report the flush position. If we report the + * flush position, the primary will know what WAL we'll possibly + * re-request, and it can then remove older WAL safely. We must always do + * that when we are using slots. + * + * Reporting the flush position makes one eligible as a synchronous + * replica. People shouldn't include generic names in + * synchronous_standby_names, but we've protected them against it so far, + * so let's continue to do so unless specifically requested. + */ + if (stream->replication_slot != NULL) + { + reportFlushPosition = true; + sprintf(slotcmd, "SLOT \"%s\" ", stream->replication_slot); + } + else + { + if (stream->synchronous) + reportFlushPosition = true; + else + reportFlushPosition = false; + slotcmd[0] = 0; + } + + if (stream->sysidentifier != NULL) + { + /* Validate system identifier hasn't changed */ + res = PQexec(conn, "IDENTIFY_SYSTEM"); + if (PQresultStatus(res) != PGRES_TUPLES_OK) + { + pg_log_error("could not send replication command \"%s\": %s", + "IDENTIFY_SYSTEM", PQerrorMessage(conn)); + PQclear(res); + return false; + } + if (PQntuples(res) != 1 || PQnfields(res) < 3) + { + pg_log_error("could not identify system: got %d rows and %d fields, expected %d rows and %d or more fields", + PQntuples(res), PQnfields(res), 1, 3); + PQclear(res); + return false; + } + if (strcmp(stream->sysidentifier, PQgetvalue(res, 0, 0)) != 0) + { + pg_log_error("system identifier does not match between base backup and streaming connection"); + PQclear(res); + return false; + } + if (stream->timeline > atoi(PQgetvalue(res, 0, 1))) + { + pg_log_error("starting timeline %u is not present in the server", + stream->timeline); + PQclear(res); + return false; + } + PQclear(res); + } + + /* + * initialize flush position to starting point, it's the caller's + * responsibility that that's sane. + */ + lastFlushPosition = stream->startpos; + + while (1) + { + /* + * Fetch the timeline history file for this timeline, if we don't have + * it already. When streaming log to tar, this will always return + * false, as we are never streaming into an existing file and + * therefore there can be no pre-existing timeline history file. + */ + if (!existsTimeLineHistoryFile(stream)) + { + snprintf(query, sizeof(query), "TIMELINE_HISTORY %u", stream->timeline); + res = PQexec(conn, query); + if (PQresultStatus(res) != PGRES_TUPLES_OK) + { + /* FIXME: we might send it ok, but get an error */ + pg_log_error("could not send replication command \"%s\": %s", + "TIMELINE_HISTORY", PQresultErrorMessage(res)); + PQclear(res); + return false; + } + + /* + * The response to TIMELINE_HISTORY is a single row result set + * with two fields: filename and content + */ + if (PQnfields(res) != 2 || PQntuples(res) != 1) + { + pg_log_warning("unexpected response to TIMELINE_HISTORY command: got %d rows and %d fields, expected %d rows and %d fields", + PQntuples(res), PQnfields(res), 1, 2); + } + + /* Write the history file to disk */ + writeTimeLineHistoryFile(stream, + PQgetvalue(res, 0, 0), + PQgetvalue(res, 0, 1)); + + PQclear(res); + } + + /* + * Before we start streaming from the requested location, check if the + * callback tells us to stop here. + */ + if (stream->stream_stop(stream->startpos, stream->timeline, false)) + return true; + + /* Initiate the replication stream at specified location */ + snprintf(query, sizeof(query), "START_REPLICATION %s%X/%X TIMELINE %u", + slotcmd, + LSN_FORMAT_ARGS(stream->startpos), + stream->timeline); + res = PQexec(conn, query); + if (PQresultStatus(res) != PGRES_COPY_BOTH) + { + pg_log_error("could not send replication command \"%s\": %s", + "START_REPLICATION", PQresultErrorMessage(res)); + PQclear(res); + return false; + } + PQclear(res); + + /* Stream the WAL */ + res = HandleCopyStream(conn, stream, &stoppos); + if (res == NULL) + goto error; + + /* + * Streaming finished. + * + * There are two possible reasons for that: a controlled shutdown, or + * we reached the end of the current timeline. In case of + * end-of-timeline, the server sends a result set after Copy has + * finished, containing information about the next timeline. Read + * that, and restart streaming from the next timeline. In case of + * controlled shutdown, stop here. + */ + if (PQresultStatus(res) == PGRES_TUPLES_OK) + { + /* + * End-of-timeline. Read the next timeline's ID and starting + * position. Usually, the starting position will match the end of + * the previous timeline, but there are corner cases like if the + * server had sent us half of a WAL record, when it was promoted. + * The new timeline will begin at the end of the last complete + * record in that case, overlapping the partial WAL record on the + * old timeline. + */ + uint32 newtimeline; + bool parsed; + + parsed = ReadEndOfStreamingResult(res, &stream->startpos, &newtimeline); + PQclear(res); + if (!parsed) + goto error; + + /* Sanity check the values the server gave us */ + if (newtimeline <= stream->timeline) + { + pg_log_error("server reported unexpected next timeline %u, following timeline %u", + newtimeline, stream->timeline); + goto error; + } + if (stream->startpos > stoppos) + { + pg_log_error("server stopped streaming timeline %u at %X/%X, but reported next timeline %u to begin at %X/%X", + stream->timeline, LSN_FORMAT_ARGS(stoppos), + newtimeline, LSN_FORMAT_ARGS(stream->startpos)); + goto error; + } + + /* Read the final result, which should be CommandComplete. */ + res = PQgetResult(conn); + if (PQresultStatus(res) != PGRES_COMMAND_OK) + { + pg_log_error("unexpected termination of replication stream: %s", + PQresultErrorMessage(res)); + PQclear(res); + goto error; + } + PQclear(res); + + /* + * Loop back to start streaming from the new timeline. Always + * start streaming at the beginning of a segment. + */ + stream->timeline = newtimeline; + stream->startpos = stream->startpos - + XLogSegmentOffset(stream->startpos, WalSegSz); + continue; + } + else if (PQresultStatus(res) == PGRES_COMMAND_OK) + { + PQclear(res); + + /* + * End of replication (ie. controlled shut down of the server). + * + * Check if the callback thinks it's OK to stop here. If not, + * complain. + */ + if (stream->stream_stop(stoppos, stream->timeline, false)) + return true; + else + { + pg_log_error("replication stream was terminated before stop point"); + goto error; + } + } + else + { + /* Server returned an error. */ + pg_log_error("unexpected termination of replication stream: %s", + PQresultErrorMessage(res)); + PQclear(res); + goto error; + } + } + +error: + if (walfile != NULL && stream->walmethod->close(walfile, CLOSE_NO_RENAME) != 0) + pg_log_error("could not close file \"%s\": %s", + current_walfile_name, stream->walmethod->getlasterror()); + walfile = NULL; + return false; +} + +/* + * Helper function to parse the result set returned by server after streaming + * has finished. On failure, prints an error to stderr and returns false. + */ +static bool +ReadEndOfStreamingResult(PGresult *res, XLogRecPtr *startpos, uint32 *timeline) +{ + uint32 startpos_xlogid, + startpos_xrecoff; + + /*---------- + * The result set consists of one row and two columns, e.g: + * + * next_tli | next_tli_startpos + * ----------+------------------- + * 4 | 0/9949AE0 + * + * next_tli is the timeline ID of the next timeline after the one that + * just finished streaming. next_tli_startpos is the WAL location where + * the server switched to it. + *---------- + */ + if (PQnfields(res) < 2 || PQntuples(res) != 1) + { + pg_log_error("unexpected result set after end-of-timeline: got %d rows and %d fields, expected %d rows and %d fields", + PQntuples(res), PQnfields(res), 1, 2); + return false; + } + + *timeline = atoi(PQgetvalue(res, 0, 0)); + if (sscanf(PQgetvalue(res, 0, 1), "%X/%X", &startpos_xlogid, + &startpos_xrecoff) != 2) + { + pg_log_error("could not parse next timeline's starting point \"%s\"", + PQgetvalue(res, 0, 1)); + return false; + } + *startpos = ((uint64) startpos_xlogid << 32) | startpos_xrecoff; + + return true; +} + +/* + * The main loop of ReceiveXlogStream. Handles the COPY stream after + * initiating streaming with the START_REPLICATION command. + * + * If the COPY ends (not necessarily successfully) due a message from the + * server, returns a PGresult and sets *stoppos to the last byte written. + * On any other sort of error, returns NULL. + */ +static PGresult * +HandleCopyStream(PGconn *conn, StreamCtl *stream, + XLogRecPtr *stoppos) +{ + char *copybuf = NULL; + TimestampTz last_status = -1; + XLogRecPtr blockpos = stream->startpos; + + still_sending = true; + + while (1) + { + int r; + TimestampTz now; + long sleeptime; + + /* + * Check if we should continue streaming, or abort at this point. + */ + if (!CheckCopyStreamStop(conn, stream, blockpos)) + goto error; + + now = feGetCurrentTimestamp(); + + /* + * If synchronous option is true, issue sync command as soon as there + * are WAL data which has not been flushed yet. + */ + if (stream->synchronous && lastFlushPosition < blockpos && walfile != NULL) + { + if (stream->walmethod->sync(walfile) != 0) + { + pg_log_fatal("could not fsync file \"%s\": %s", + current_walfile_name, stream->walmethod->getlasterror()); + exit(1); + } + lastFlushPosition = blockpos; + + /* + * Send feedback so that the server sees the latest WAL locations + * immediately. + */ + if (!sendFeedback(conn, blockpos, now, false)) + goto error; + last_status = now; + } + + /* + * Potentially send a status message to the primary + */ + if (still_sending && stream->standby_message_timeout > 0 && + feTimestampDifferenceExceeds(last_status, now, + stream->standby_message_timeout)) + { + /* Time to send feedback! */ + if (!sendFeedback(conn, blockpos, now, false)) + goto error; + last_status = now; + } + + /* + * Calculate how long send/receive loops should sleep + */ + sleeptime = CalculateCopyStreamSleeptime(now, stream->standby_message_timeout, + last_status); + + r = CopyStreamReceive(conn, sleeptime, stream->stop_socket, ©buf); + while (r != 0) + { + if (r == -1) + goto error; + if (r == -2) + { + PGresult *res = HandleEndOfCopyStream(conn, stream, copybuf, blockpos, stoppos); + + if (res == NULL) + goto error; + else + return res; + } + + /* Check the message type. */ + if (copybuf[0] == 'k') + { + if (!ProcessKeepaliveMsg(conn, stream, copybuf, r, blockpos, + &last_status)) + goto error; + } + else if (copybuf[0] == 'w') + { + if (!ProcessXLogDataMsg(conn, stream, copybuf, r, &blockpos)) + goto error; + + /* + * Check if we should continue streaming, or abort at this + * point. + */ + if (!CheckCopyStreamStop(conn, stream, blockpos)) + goto error; + } + else + { + pg_log_error("unrecognized streaming header: \"%c\"", + copybuf[0]); + goto error; + } + + /* + * Process the received data, and any subsequent data we can read + * without blocking. + */ + r = CopyStreamReceive(conn, 0, stream->stop_socket, ©buf); + } + } + +error: + if (copybuf != NULL) + PQfreemem(copybuf); + return NULL; +} + +/* + * Wait until we can read a CopyData message, + * or timeout, or occurrence of a signal or input on the stop_socket. + * (timeout_ms < 0 means wait indefinitely; 0 means don't wait.) + * + * Returns 1 if data has become available for reading, 0 if timed out + * or interrupted by signal or stop_socket input, and -1 on an error. + */ +static int +CopyStreamPoll(PGconn *conn, long timeout_ms, pgsocket stop_socket) +{ + int ret; + fd_set input_mask; + int connsocket; + int maxfd; + struct timeval timeout; + struct timeval *timeoutptr; + + connsocket = PQsocket(conn); + if (connsocket < 0) + { + pg_log_error("invalid socket: %s", PQerrorMessage(conn)); + return -1; + } + + FD_ZERO(&input_mask); + FD_SET(connsocket, &input_mask); + maxfd = connsocket; + if (stop_socket != PGINVALID_SOCKET) + { + FD_SET(stop_socket, &input_mask); + maxfd = Max(maxfd, stop_socket); + } + + if (timeout_ms < 0) + timeoutptr = NULL; + else + { + timeout.tv_sec = timeout_ms / 1000L; + timeout.tv_usec = (timeout_ms % 1000L) * 1000L; + timeoutptr = &timeout; + } + + ret = select(maxfd + 1, &input_mask, NULL, NULL, timeoutptr); + + if (ret < 0) + { + if (errno == EINTR) + return 0; /* Got a signal, so not an error */ + pg_log_error("%s() failed: %m", "select"); + return -1; + } + if (ret > 0 && FD_ISSET(connsocket, &input_mask)) + return 1; /* Got input on connection socket */ + + return 0; /* Got timeout or input on stop_socket */ +} + +/* + * Receive CopyData message available from XLOG stream, blocking for + * maximum of 'timeout' ms. + * + * If data was received, returns the length of the data. *buffer is set to + * point to a buffer holding the received message. The buffer is only valid + * until the next CopyStreamReceive call. + * + * Returns 0 if no data was available within timeout, or if wait was + * interrupted by signal or stop_socket input. + * -1 on error. -2 if the server ended the COPY. + */ +static int +CopyStreamReceive(PGconn *conn, long timeout, pgsocket stop_socket, + char **buffer) +{ + char *copybuf = NULL; + int rawlen; + + if (*buffer != NULL) + PQfreemem(*buffer); + *buffer = NULL; + + /* Try to receive a CopyData message */ + rawlen = PQgetCopyData(conn, ©buf, 1); + if (rawlen == 0) + { + int ret; + + /* + * No data available. Wait for some to appear, but not longer than + * the specified timeout, so that we can ping the server. Also stop + * waiting if input appears on stop_socket. + */ + ret = CopyStreamPoll(conn, timeout, stop_socket); + if (ret <= 0) + return ret; + + /* Now there is actually data on the socket */ + if (PQconsumeInput(conn) == 0) + { + pg_log_error("could not receive data from WAL stream: %s", + PQerrorMessage(conn)); + return -1; + } + + /* Now that we've consumed some input, try again */ + rawlen = PQgetCopyData(conn, ©buf, 1); + if (rawlen == 0) + return 0; + } + if (rawlen == -1) /* end-of-streaming or error */ + return -2; + if (rawlen == -2) + { + pg_log_error("could not read COPY data: %s", PQerrorMessage(conn)); + return -1; + } + + /* Return received messages to caller */ + *buffer = copybuf; + return rawlen; +} + +/* + * Process the keepalive message. + */ +static bool +ProcessKeepaliveMsg(PGconn *conn, StreamCtl *stream, char *copybuf, int len, + XLogRecPtr blockpos, TimestampTz *last_status) +{ + int pos; + bool replyRequested; + TimestampTz now; + + /* + * Parse the keepalive message, enclosed in the CopyData message. We just + * check if the server requested a reply, and ignore the rest. + */ + pos = 1; /* skip msgtype 'k' */ + pos += 8; /* skip walEnd */ + pos += 8; /* skip sendTime */ + + if (len < pos + 1) + { + pg_log_error("streaming header too small: %d", len); + return false; + } + replyRequested = copybuf[pos]; + + /* If the server requested an immediate reply, send one. */ + if (replyRequested && still_sending) + { + if (reportFlushPosition && lastFlushPosition < blockpos && + walfile != NULL) + { + /* + * If a valid flush location needs to be reported, flush the + * current WAL file so that the latest flush location is sent back + * to the server. This is necessary to see whether the last WAL + * data has been successfully replicated or not, at the normal + * shutdown of the server. + */ + if (stream->walmethod->sync(walfile) != 0) + { + pg_log_fatal("could not fsync file \"%s\": %s", + current_walfile_name, stream->walmethod->getlasterror()); + exit(1); + } + lastFlushPosition = blockpos; + } + + now = feGetCurrentTimestamp(); + if (!sendFeedback(conn, blockpos, now, false)) + return false; + *last_status = now; + } + + return true; +} + +/* + * Process XLogData message. + */ +static bool +ProcessXLogDataMsg(PGconn *conn, StreamCtl *stream, char *copybuf, int len, + XLogRecPtr *blockpos) +{ + int xlogoff; + int bytes_left; + int bytes_written; + int hdr_len; + + /* + * Once we've decided we don't want to receive any more, just ignore any + * subsequent XLogData messages. + */ + if (!(still_sending)) + return true; + + /* + * Read the header of the XLogData message, enclosed in the CopyData + * message. We only need the WAL location field (dataStart), the rest of + * the header is ignored. + */ + hdr_len = 1; /* msgtype 'w' */ + hdr_len += 8; /* dataStart */ + hdr_len += 8; /* walEnd */ + hdr_len += 8; /* sendTime */ + if (len < hdr_len) + { + pg_log_error("streaming header too small: %d", len); + return false; + } + *blockpos = fe_recvint64(©buf[1]); + + /* Extract WAL location for this block */ + xlogoff = XLogSegmentOffset(*blockpos, WalSegSz); + + /* + * Verify that the initial location in the stream matches where we think + * we are. + */ + if (walfile == NULL) + { + /* No file open yet */ + if (xlogoff != 0) + { + pg_log_error("received write-ahead log record for offset %u with no file open", + xlogoff); + return false; + } + } + else + { + /* More data in existing segment */ + if (stream->walmethod->get_current_pos(walfile) != xlogoff) + { + pg_log_error("got WAL data offset %08x, expected %08x", + xlogoff, (int) stream->walmethod->get_current_pos(walfile)); + return false; + } + } + + bytes_left = len - hdr_len; + bytes_written = 0; + + while (bytes_left) + { + int bytes_to_write; + + /* + * If crossing a WAL boundary, only write up until we reach wal + * segment size. + */ + if (xlogoff + bytes_left > WalSegSz) + bytes_to_write = WalSegSz - xlogoff; + else + bytes_to_write = bytes_left; + + if (walfile == NULL) + { + if (!open_walfile(stream, *blockpos)) + { + /* Error logged by open_walfile */ + return false; + } + } + + if (stream->walmethod->write(walfile, copybuf + hdr_len + bytes_written, + bytes_to_write) != bytes_to_write) + { + pg_log_error("could not write %u bytes to WAL file \"%s\": %s", + bytes_to_write, current_walfile_name, + stream->walmethod->getlasterror()); + return false; + } + + /* Write was successful, advance our position */ + bytes_written += bytes_to_write; + bytes_left -= bytes_to_write; + *blockpos += bytes_to_write; + xlogoff += bytes_to_write; + + /* Did we reach the end of a WAL segment? */ + if (XLogSegmentOffset(*blockpos, WalSegSz) == 0) + { + if (!close_walfile(stream, *blockpos)) + /* Error message written in close_walfile() */ + return false; + + xlogoff = 0; + + if (still_sending && stream->stream_stop(*blockpos, stream->timeline, true)) + { + if (PQputCopyEnd(conn, NULL) <= 0 || PQflush(conn)) + { + pg_log_error("could not send copy-end packet: %s", + PQerrorMessage(conn)); + return false; + } + still_sending = false; + return true; /* ignore the rest of this XLogData packet */ + } + } + } + /* No more data left to write, receive next copy packet */ + + return true; +} + +/* + * Handle end of the copy stream. + */ +static PGresult * +HandleEndOfCopyStream(PGconn *conn, StreamCtl *stream, char *copybuf, + XLogRecPtr blockpos, XLogRecPtr *stoppos) +{ + PGresult *res = PQgetResult(conn); + + /* + * The server closed its end of the copy stream. If we haven't closed + * ours already, we need to do so now, unless the server threw an error, + * in which case we don't. + */ + if (still_sending) + { + if (!close_walfile(stream, blockpos)) + { + /* Error message written in close_walfile() */ + PQclear(res); + return NULL; + } + if (PQresultStatus(res) == PGRES_COPY_IN) + { + if (PQputCopyEnd(conn, NULL) <= 0 || PQflush(conn)) + { + pg_log_error("could not send copy-end packet: %s", + PQerrorMessage(conn)); + PQclear(res); + return NULL; + } + res = PQgetResult(conn); + } + still_sending = false; + } + if (copybuf != NULL) + PQfreemem(copybuf); + *stoppos = blockpos; + return res; +} + +/* + * Check if we should continue streaming, or abort at this point. + */ +static bool +CheckCopyStreamStop(PGconn *conn, StreamCtl *stream, XLogRecPtr blockpos) +{ + if (still_sending && stream->stream_stop(blockpos, stream->timeline, false)) + { + if (!close_walfile(stream, blockpos)) + { + /* Potential error message is written by close_walfile */ + return false; + } + if (PQputCopyEnd(conn, NULL) <= 0 || PQflush(conn)) + { + pg_log_error("could not send copy-end packet: %s", + PQerrorMessage(conn)); + return false; + } + still_sending = false; + } + + return true; +} + +/* + * Calculate how long send/receive loops should sleep + */ +static long +CalculateCopyStreamSleeptime(TimestampTz now, int standby_message_timeout, + TimestampTz last_status) +{ + TimestampTz status_targettime = 0; + long sleeptime; + + if (standby_message_timeout && still_sending) + status_targettime = last_status + + (standby_message_timeout - 1) * ((int64) 1000); + + if (status_targettime > 0) + { + long secs; + int usecs; + + feTimestampDifference(now, + status_targettime, + &secs, + &usecs); + /* Always sleep at least 1 sec */ + if (secs <= 0) + { + secs = 1; + usecs = 0; + } + + sleeptime = secs * 1000 + usecs / 1000; + } + else + sleeptime = -1; + + return sleeptime; +} diff --git a/src/compatibility/receivelog.h b/src/compatibility/receivelog.h new file mode 100644 index 000000000..e04333bf8 --- /dev/null +++ b/src/compatibility/receivelog.h @@ -0,0 +1,57 @@ +/*------------------------------------------------------------------------- + * + * receivelog.h + * + * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/bin/pg_basebackup/receivelog.h + *------------------------------------------------------------------------- + */ + +#ifndef RECEIVELOG_H +#define RECEIVELOG_H + +#include "access/xlogdefs.h" +#include "libpq-fe.h" +#include "walmethods.h" + +/* + * Called before trying to read more data or when a segment is + * finished. Return true to stop streaming. + */ +typedef bool (*stream_stop_callback) (XLogRecPtr segendpos, uint32 timeline, bool segment_finished); + +/* + * Global parameters when receiving xlog stream. For details about the individual fields, + * see the function comment for ReceiveXlogStream(). + */ +typedef struct StreamCtl +{ + XLogRecPtr startpos; /* Start position for streaming */ + TimeLineID timeline; /* Timeline to stream data from */ + char *sysidentifier; /* Validate this system identifier and + * timeline */ + int standby_message_timeout; /* Send status messages this often */ + bool synchronous; /* Flush immediately WAL data on write */ + bool mark_done; /* Mark segment as done in generated archive */ + bool do_sync; /* Flush to disk to ensure consistent state of + * data */ + + stream_stop_callback stream_stop; /* Stop streaming when returns true */ + + pgsocket stop_socket; /* if valid, watch for input on this socket + * and check stream_stop() when there is any */ + + WalWriteMethod *walmethod; /* How to write the WAL */ + char *partial_suffix; /* Suffix appended to partially received files */ + char *replication_slot; /* Replication slot to use, or NULL */ +} StreamCtl; + + + +extern bool CheckServerVersionForStreaming(PGconn *conn); +extern bool ReceiveXlogStream(PGconn *conn, + StreamCtl *stream); + +#endif /* RECEIVELOG_H */ diff --git a/src/compatibility/streamutil.c b/src/compatibility/streamutil.c new file mode 100644 index 000000000..187cc270e --- /dev/null +++ b/src/compatibility/streamutil.c @@ -0,0 +1,281 @@ +/*------------------------------------------------------------------------- + * + * streamutil.c - utility functions for pg_basebackup, pg_receivewal and + * pg_recvlogical + * + * Author: Magnus Hagander + * + * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/bin/pg_basebackup/streamutil.c + *------------------------------------------------------------------------- + */ + +#include "postgres_fe.h" + +#include +#include + +#include "common/connect.h" +#include "common/fe_memutils.h" +#include "logging.h" +#include "datatype/timestamp.h" +#include "port/pg_bswap.h" +#include "pqexpbuffer.h" +#include "receivelog.h" +#include "streamutil.h" + + +#define ERRCODE_DUPLICATE_OBJECT "42710" + +uint32 WalSegSz; + +#include "simple_prompt.h" +#include "file_compat.h" + +#include + +/* + * From version 10, explicitly set wal segment size using SHOW wal_segment_size + * since ControlFile is not accessible here. + */ +bool +RetrieveWalSegSize(PGconn *conn) +{ + PGresult *res; + char xlog_unit[3]; + int xlog_val, + multiplier = 1; + + /* check connection existence */ + Assert(conn != NULL); + + res = PQexec(conn, "SHOW wal_segment_size"); + if (PQresultStatus(res) != PGRES_TUPLES_OK) + { + pg_log_error("could not send replication command \"%s\": %s", + "SHOW wal_segment_size", PQerrorMessage(conn)); + + PQclear(res); + return false; + } + if (PQntuples(res) != 1 || PQnfields(res) < 1) + { + pg_log_error("could not fetch WAL segment size: got %d rows and %d fields, expected %d rows and %d or more fields", + PQntuples(res), PQnfields(res), 1, 1); + + PQclear(res); + return false; + } + + /* fetch xlog value and unit from the result */ + if (sscanf(PQgetvalue(res, 0, 0), "%d%2s", &xlog_val, xlog_unit) != 2) + { + pg_log_error("WAL segment size could not be parsed"); + PQclear(res); + return false; + } + + PQclear(res); + xlog_unit[2] = 0; + /* set the multiplier based on unit to convert xlog_val to bytes */ + if (strcmp(xlog_unit, "MB") == 0) + multiplier = 1024 * 1024; + else if (strcmp(xlog_unit, "GB") == 0) + multiplier = 1024 * 1024 * 1024; + + /* convert and set WalSegSz */ + WalSegSz = xlog_val * multiplier; + + if (!IsValidWalSegSize(WalSegSz)) + { + pg_log_error(ngettext("WAL segment size must be a power of two between 1 MB and 1 GB, but the remote server reported a value of %d byte", + "WAL segment size must be a power of two between 1 MB and 1 GB, but the remote server reported a value of %d bytes", + WalSegSz), + WalSegSz); + return false; + } + + return true; +} + + +/* + * Create a replication slot for the given connection. This function + * returns true in case of success. + */ +bool +CreateReplicationSlot(PGconn *conn, const char *slot_name, const char *plugin, + bool is_temporary, bool is_physical, bool reserve_wal, + bool slot_exists_ok) +{ + PQExpBuffer query; + PGresult *res; + + query = createPQExpBuffer(); + + Assert((is_physical && plugin == NULL) || + (!is_physical && plugin != NULL)); + Assert(slot_name != NULL); + + /* Build query */ + appendPQExpBuffer(query, "CREATE_REPLICATION_SLOT \"%s\"", slot_name); + if (is_temporary) + appendPQExpBufferStr(query, " TEMPORARY"); + if (is_physical) + { + appendPQExpBufferStr(query, " PHYSICAL"); + if (reserve_wal) + appendPQExpBufferStr(query, " RESERVE_WAL"); + } + else + { + appendPQExpBuffer(query, " LOGICAL \"%s\"", plugin); + if (PQserverVersion(conn) >= 100000) + /* pg_recvlogical doesn't use an exported snapshot, so suppress */ + appendPQExpBufferStr(query, " NOEXPORT_SNAPSHOT"); + } + + res = PQexec(conn, query->data); + if (PQresultStatus(res) != PGRES_TUPLES_OK) + { + const char *sqlstate = PQresultErrorField(res, PG_DIAG_SQLSTATE); + + if (slot_exists_ok && + sqlstate && + strcmp(sqlstate, ERRCODE_DUPLICATE_OBJECT) == 0) + { + destroyPQExpBuffer(query); + PQclear(res); + return true; + } + else + { + pg_log_error("could not send replication command \"%s\": %s", + query->data, PQerrorMessage(conn)); + + destroyPQExpBuffer(query); + PQclear(res); + return false; + } + } + + if (PQntuples(res) != 1 || PQnfields(res) != 4) + { + pg_log_error("could not create replication slot \"%s\": got %d rows and %d fields, expected %d rows and %d fields", + slot_name, + PQntuples(res), PQnfields(res), 1, 4); + + destroyPQExpBuffer(query); + PQclear(res); + return false; + } + + destroyPQExpBuffer(query); + PQclear(res); + return true; +} + +/* + * Frontend version of GetCurrentTimestamp(), since we are not linked with + * backend code. + */ +TimestampTz +feGetCurrentTimestamp(void) +{ + TimestampTz result; + struct timeval tp; + + gettimeofday(&tp, NULL); + + result = (TimestampTz) tp.tv_sec - + ((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY); + result = (result * USECS_PER_SEC) + tp.tv_usec; + + return result; +} + +/* + * Frontend version of TimestampDifference(), since we are not linked with + * backend code. + */ +void +feTimestampDifference(TimestampTz start_time, TimestampTz stop_time, + long *secs, int *microsecs) +{ + TimestampTz diff = stop_time - start_time; + + if (diff <= 0) + { + *secs = 0; + *microsecs = 0; + } + else + { + *secs = (long) (diff / USECS_PER_SEC); + *microsecs = (int) (diff % USECS_PER_SEC); + } +} + +/* + * Frontend version of TimestampDifferenceExceeds(), since we are not + * linked with backend code. + */ +bool +feTimestampDifferenceExceeds(TimestampTz start_time, + TimestampTz stop_time, + int msec) +{ + TimestampTz diff = stop_time - start_time; + + return (diff >= msec * INT64CONST(1000)); +} + +/* + * Converts an int64 to network byte order. + */ + +void +fe_sendint64(int64 i, char *buf) +{ + + uint32 n32; + + /* High order half first, since we're doing MSB-first */ + n32 = (uint32) (i >> 32); + n32 = htonl(n32); + memcpy(&buf[0], &n32, 4); + + /* Now the low order half */ + n32 = (uint32) i; + n32 = htonl(n32); + memcpy(&buf[4], &n32, 4); +} + +/* + * Converts an int64 from network byte order to native format. + */ + +int64 +fe_recvint64(char *buf) +{ + int64 result; + uint32 h32; + uint32 l32; + + memcpy(&h32, buf, 4); + memcpy(&l32, buf + 4, 4); + h32 = ntohl(h32); + l32 = ntohl(l32); + + result = h32; + result <<= 32; + result |= l32; + + return result; + +} + + + diff --git a/src/compatibility/streamutil.h b/src/compatibility/streamutil.h new file mode 100644 index 000000000..3a2feaa39 --- /dev/null +++ b/src/compatibility/streamutil.h @@ -0,0 +1,36 @@ +/*------------------------------------------------------------------------- + * + * streamutil.h + * + * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/bin/pg_basebackup/streamutil.h + *------------------------------------------------------------------------- + */ + +#ifndef STREAMUTIL_H +#define STREAMUTIL_H + +#include "access/xlogdefs.h" +#include "datatype/timestamp.h" +#include "libpq-fe.h" + +extern uint32 WalSegSz; + +/* Replication commands */ +extern bool CreateReplicationSlot(PGconn *conn, const char *slot_name, + const char *plugin, bool is_temporary, + bool is_physical, bool reserve_wal, + bool slot_exists_ok); +extern bool RetrieveWalSegSize(PGconn *conn); +extern TimestampTz feGetCurrentTimestamp(void); +extern void feTimestampDifference(TimestampTz start_time, TimestampTz stop_time, + long *secs, int *microsecs); + +extern bool feTimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, + int msec); +extern void fe_sendint64(int64 i, char *buf); +extern int64 fe_recvint64(char *buf); + +#endif /* STREAMUTIL_H */ diff --git a/src/compatibility/walmethods.c b/src/compatibility/walmethods.c new file mode 100644 index 000000000..29eb08f68 --- /dev/null +++ b/src/compatibility/walmethods.c @@ -0,0 +1,467 @@ +/*------------------------------------------------------------------------- + * + * walmethods.c - implementations of different ways to write received wal + * + * NOTE! The caller must ensure that only one method is instantiated in + * any given program, and that it's only instantiated once! + * + * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/bin/pg_basebackup/walmethods.c + *------------------------------------------------------------------------- + */ + +#include "pg_probackup.h" +#include "postgres_fe.h" + +#include +#include +#include +#ifdef HAVE_LIBZ +#include +#endif + +#include "common/file_utils.h" +#include "pgtar.h" +#include "receivelog.h" +#include "streamutil.h" + +/* Size of zlib buffer for .tar.gz */ +#define ZLIB_OUT_SIZE 4096 + +#include "file_compat.h" + +#ifndef unconstify +#define unconstify(underlying_type, expr) \ + ((underlying_type) (expr)) +#endif + +/*------------------------------------------------------------------------- + * WalDirectoryMethod - write wal to a directory looking like pg_wal + *------------------------------------------------------------------------- + */ + +/* + * Global static data for this method + */ +typedef struct DirectoryMethodData +{ + char *basedir; + int compression; + bool sync; + const char *lasterrstring; /* if set, takes precedence over lasterrno */ + int lasterrno; +} DirectoryMethodData; +static DirectoryMethodData *dir_data = NULL; + +/* + * Local file handle + */ +typedef struct DirectoryMethodFile +{ + int fd; + off_t currpos; + char *pathname; + char *fullpath; + char *temp_suffix; +#ifdef HAVE_LIBZ + gzFile gzfp; +#endif +} DirectoryMethodFile; + +#define dir_clear_error() \ + (dir_data->lasterrstring = NULL, dir_data->lasterrno = 0) +#define dir_set_error(msg) \ + (dir_data->lasterrstring = _(msg)) + +static const char * +dir_getlasterror(void) +{ + if (dir_data->lasterrstring) + return dir_data->lasterrstring; + return strerror(dir_data->lasterrno); +} + +static char * +dir_get_file_name(const char *pathname, const char *temp_suffix) +{ + char *filename = pg_malloc0(MAXPGPATH * sizeof(char)); + + snprintf(filename, MAXPGPATH, "%s%s%s", + pathname, dir_data->compression > 0 ? ".gz" : "", + temp_suffix ? temp_suffix : ""); + + return filename; +} + +static Walfile +dir_open_for_write(const char *pathname, const char *temp_suffix, size_t pad_to_size) +{ + char tmppath[MAXPGPATH]; + char *filename; + int fd; + DirectoryMethodFile *f; +#ifdef HAVE_LIBZ + gzFile gzfp = NULL; +#endif + + dir_clear_error(); + + filename = dir_get_file_name(pathname, temp_suffix); + snprintf(tmppath, sizeof(tmppath), "%s/%s", + dir_data->basedir, filename); + pg_free(filename); + + /* + * Open a file for non-compressed as well as compressed files. Tracking + * the file descriptor is important for dir_sync() method as gzflush() + * does not do any system calls to fsync() to make changes permanent on + * disk. + */ + fd = open(tmppath, O_WRONLY | O_CREAT | PG_BINARY, pg_file_create_mode); + if (fd < 0) + { + dir_data->lasterrno = errno; + return NULL; + } + +#ifdef HAVE_LIBZ + if (dir_data->compression > 0) + { + gzfp = gzdopen(fd, "wb"); + if (gzfp == NULL) + { + dir_data->lasterrno = errno; + close(fd); + return NULL; + } + + if (gzsetparams(gzfp, dir_data->compression, + Z_DEFAULT_STRATEGY) != Z_OK) + { + dir_data->lasterrno = errno; + gzclose(gzfp); + return NULL; + } + } +#endif + + /* Do pre-padding on non-compressed files */ + if (pad_to_size && dir_data->compression == 0) + { + PGAlignedXLogBlock zerobuf; + int bytes; + + memset(zerobuf.data, 0, XLOG_BLCKSZ); + for (bytes = 0; bytes < pad_to_size; bytes += XLOG_BLCKSZ) + { + errno = 0; + if (write(fd, zerobuf.data, XLOG_BLCKSZ) != XLOG_BLCKSZ) + { + /* If write didn't set errno, assume problem is no disk space */ + dir_data->lasterrno = errno ? errno : ENOSPC; + close(fd); + return NULL; + } + } + + if (lseek(fd, 0, SEEK_SET) != 0) + { + dir_data->lasterrno = errno; + close(fd); + return NULL; + } + } + + /* + * fsync WAL file and containing directory, to ensure the file is + * persistently created and zeroed (if padded). That's particularly + * important when using synchronous mode, where the file is modified and + * fsynced in-place, without a directory fsync. + */ + if (dir_data->sync) + { + if (fsync_fname_compat(tmppath, false) != 0 || + fsync_parent_path_compat(tmppath) != 0) + { + dir_data->lasterrno = errno; +#ifdef HAVE_LIBZ + if (dir_data->compression > 0) + gzclose(gzfp); + else +#endif + close(fd); + return NULL; + } + } + + f = pg_malloc0(sizeof(DirectoryMethodFile)); +#ifdef HAVE_LIBZ + if (dir_data->compression > 0) + f->gzfp = gzfp; +#endif + f->fd = fd; + f->currpos = 0; + f->pathname = pg_strdup(pathname); + f->fullpath = pg_strdup(tmppath); + if (temp_suffix) + f->temp_suffix = pg_strdup(temp_suffix); + + return f; +} + +static ssize_t +dir_write(Walfile f, const void *buf, size_t count) +{ + ssize_t r; + DirectoryMethodFile *df = (DirectoryMethodFile *) f; + + Assert(f != NULL); + dir_clear_error(); + +#ifdef HAVE_LIBZ + if (dir_data->compression > 0) + { + errno = 0; + r = (ssize_t) gzwrite(df->gzfp, buf, count); + if (r != count) + { + /* If write didn't set errno, assume problem is no disk space */ + dir_data->lasterrno = errno ? errno : ENOSPC; + } + } + else +#endif + { + errno = 0; + r = write(df->fd, buf, count); + if (r != count) + { + /* If write didn't set errno, assume problem is no disk space */ + dir_data->lasterrno = errno ? errno : ENOSPC; + } + } + if (r > 0) + df->currpos += r; + return r; +} + +static off_t +dir_get_current_pos(Walfile f) +{ + Assert(f != NULL); + dir_clear_error(); + + /* Use a cached value to prevent lots of reseeks */ + return ((DirectoryMethodFile *) f)->currpos; +} + +static int +dir_close(Walfile f, WalCloseMethod method) +{ + int r; + DirectoryMethodFile *df = (DirectoryMethodFile *) f; + char tmppath[MAXPGPATH]; + char tmppath2[MAXPGPATH]; + + Assert(f != NULL); + dir_clear_error(); + +#ifdef HAVE_LIBZ + if (dir_data->compression > 0) + { + errno = 0; /* in case gzclose() doesn't set it */ + r = gzclose(df->gzfp); + } + else +#endif + r = close(df->fd); + + if (r == 0) + { + /* Build path to the current version of the file */ + if (method == CLOSE_NORMAL && df->temp_suffix) + { + char *filename; + char *filename2; + + /* + * If we have a temp prefix, normal operation is to rename the + * file. + */ + filename = dir_get_file_name(df->pathname, df->temp_suffix); + snprintf(tmppath, sizeof(tmppath), "%s/%s", + dir_data->basedir, filename); + pg_free(filename); + + /* permanent name, so no need for the prefix */ + filename2 = dir_get_file_name(df->pathname, NULL); + snprintf(tmppath2, sizeof(tmppath2), "%s/%s", + dir_data->basedir, filename2); + pg_free(filename2); + r = durable_rename_compat(tmppath, tmppath2); + } + else if (method == CLOSE_UNLINK) + { + char *filename; + + /* Unlink the file once it's closed */ + filename = dir_get_file_name(df->pathname, df->temp_suffix); + snprintf(tmppath, sizeof(tmppath), "%s/%s", + dir_data->basedir, filename); + pg_free(filename); + r = unlink(tmppath); + } + else + { + /* + * Else either CLOSE_NORMAL and no temp suffix, or + * CLOSE_NO_RENAME. In this case, fsync the file and containing + * directory if sync mode is requested. + */ + if (dir_data->sync) + { + r = fsync_fname_compat(df->fullpath, false); + if (r == 0) + r = fsync_parent_path_compat(df->fullpath); + } + } + } + + if (r != 0) + dir_data->lasterrno = errno; + + pg_free(df->pathname); + pg_free(df->fullpath); + if (df->temp_suffix) + pg_free(df->temp_suffix); + pg_free(df); + + return r; +} + +static int +dir_sync(Walfile f) +{ + int r; + + Assert(f != NULL); + dir_clear_error(); + + if (!dir_data->sync) + return 0; + +#ifdef HAVE_LIBZ + if (dir_data->compression > 0) + { + if (gzflush(((DirectoryMethodFile *) f)->gzfp, Z_SYNC_FLUSH) != Z_OK) + { + dir_data->lasterrno = errno; + return -1; + } + } +#endif + + r = fsync(((DirectoryMethodFile *) f)->fd); + if (r < 0) + dir_data->lasterrno = errno; + return r; +} + +static ssize_t +dir_get_file_size(const char *pathname) +{ + struct stat statbuf; + char tmppath[MAXPGPATH]; + + snprintf(tmppath, sizeof(tmppath), "%s/%s", + dir_data->basedir, pathname); + + if (stat(tmppath, &statbuf) != 0) + { + dir_data->lasterrno = errno; + return -1; + } + + return statbuf.st_size; +} + +static int +dir_compression(void) +{ + return dir_data->compression; +} + +static bool +dir_existsfile(const char *pathname) +{ + char tmppath[MAXPGPATH]; + int fd; + + dir_clear_error(); + + snprintf(tmppath, sizeof(tmppath), "%s/%s", + dir_data->basedir, pathname); + + fd = open(tmppath, O_RDONLY | PG_BINARY, 0); + if (fd < 0) + return false; + close(fd); + return true; +} + +static bool +dir_finish(void) +{ + dir_clear_error(); + + if (dir_data->sync) + { + /* + * Files are fsynced when they are closed, but we need to fsync the + * directory entry here as well. + */ + if (fsync_fname_compat(dir_data->basedir, true) != 0) + { + dir_data->lasterrno = errno; + return false; + } + } + return true; +} + + +WalWriteMethod * +CreateWalDirectoryMethod(const char *basedir, int compression, bool sync) +{ + WalWriteMethod *method; + + method = pg_malloc0(sizeof(WalWriteMethod)); + method->open_for_write = dir_open_for_write; + method->write = dir_write; + method->get_current_pos = dir_get_current_pos; + method->get_file_size = dir_get_file_size; + method->get_file_name = dir_get_file_name; + method->compression = dir_compression; + method->close = dir_close; + method->sync = dir_sync; + method->existsfile = dir_existsfile; + method->finish = dir_finish; + method->getlasterror = dir_getlasterror; + + dir_data = pg_malloc0(sizeof(DirectoryMethodData)); + dir_data->compression = compression; + dir_data->basedir = pg_strdup(basedir); + dir_data->sync = sync; + + return method; +} + +void +FreeWalDirectoryMethod(void) +{ + pg_free(dir_data->basedir); + pg_free(dir_data); + dir_data = NULL; +} diff --git a/src/compatibility/walmethods.h b/src/compatibility/walmethods.h new file mode 100644 index 000000000..e1e3aacf3 --- /dev/null +++ b/src/compatibility/walmethods.h @@ -0,0 +1,101 @@ +/*------------------------------------------------------------------------- + * + * walmethods.h + * + * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/bin/pg_basebackup/walmethods.h + *------------------------------------------------------------------------- + */ + + +typedef void *Walfile; + +typedef enum +{ + CLOSE_NORMAL, + CLOSE_UNLINK, + CLOSE_NO_RENAME +} WalCloseMethod; + +/* + * A WalWriteMethod structure represents the different methods used + * to write the streaming WAL as it's received. + * + * All methods that have a failure return indicator will set state + * allowing the getlasterror() method to return a suitable message. + * Commonly, errno is this state (or part of it); so callers must take + * care not to clobber errno between a failed method call and use of + * getlasterror() to retrieve the message. + */ +typedef struct WalWriteMethod WalWriteMethod; +struct WalWriteMethod +{ + /* + * Open a target file. Returns Walfile, or NULL if open failed. If a temp + * suffix is specified, a file with that name will be opened, and then + * automatically renamed in close(). If pad_to_size is specified, the file + * will be padded with NUL up to that size, if supported by the Walmethod. + */ + Walfile (*open_for_write) (const char *pathname, const char *temp_suffix, size_t pad_to_size); + + /* + * Close an open Walfile, using one or more methods for handling automatic + * unlinking etc. Returns 0 on success, other values for error. + */ + int (*close) (Walfile f, WalCloseMethod method); + + /* Check if a file exist */ + bool (*existsfile) (const char *pathname); + + /* Return the size of a file, or -1 on failure. */ + ssize_t (*get_file_size) (const char *pathname); + + /* + * Return the name of the current file to work on in pg_malloc()'d string, + * without the base directory. This is useful for logging. + */ + char *(*get_file_name) (const char *pathname, const char *temp_suffix); + + /* Return the level of compression */ + int (*compression) (void); + + /* + * Write count number of bytes to the file, and return the number of bytes + * actually written or -1 for error. + */ + ssize_t (*write) (Walfile f, const void *buf, size_t count); + + /* Return the current position in a file or -1 on error */ + off_t (*get_current_pos) (Walfile f); + + /* + * fsync the contents of the specified file. Returns 0 on success. + */ + int (*sync) (Walfile f); + + /* + * Clean up the Walmethod, closing any shared resources. For methods like + * tar, this includes writing updated headers. Returns true if the + * close/write/sync of shared resources succeeded, otherwise returns false + * (but the resources are still closed). + */ + bool (*finish) (void); + + /* Return a text for the last error in this Walfile */ + const char *(*getlasterror) (void); +}; + +/* + * Available WAL methods: + * - WalDirectoryMethod - write WAL to regular files in a standard pg_wal + * - WalTarMethod - write WAL to a tarfile corresponding to pg_wal + * (only implements the methods required for pg_basebackup, + * not all those required for pg_receivewal) + */ +WalWriteMethod *CreateWalDirectoryMethod(const char *basedir, + int compression, bool sync); + +/* Cleanup routines for previously-created methods */ +void FreeWalDirectoryMethod(void); diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 1df7bffe6..9a59ff3ab 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -324,13 +324,11 @@ main(int argc, char *argv[]) my_pid = getpid(); //set_pglocale_pgservice(argv[0], "pgscripts"); -#if PG_VERSION_NUM >= 110000 /* * Reset WAL segment size, we will retreive it using RetrieveWalSegSize() * later. */ WalSegSz = 0; -#endif /* * Save main thread's tid. It is used call exit() in case of errors. @@ -720,14 +718,6 @@ main(int argc, char *argv[]) if (!instance_config.conn_opt.pghost && instance_config.remote.host) instance_config.conn_opt.pghost = instance_config.remote.host; - /* Setup stream options. They are used in streamutil.c. */ - if (instance_config.conn_opt.pghost != NULL) - dbhost = pstrdup(instance_config.conn_opt.pghost); - if (instance_config.conn_opt.pgport != NULL) - dbport = pstrdup(instance_config.conn_opt.pgport); - if (instance_config.conn_opt.pguser != NULL) - dbuser = pstrdup(instance_config.conn_opt.pguser); - if (backup_subcmd == VALIDATE_CMD || backup_subcmd == RESTORE_CMD) { /* diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 5799bda0c..1b24d96c9 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -16,6 +16,8 @@ #include "libpq-fe.h" #include "access/xlog_internal.h" +#include "file_compat.h" + #include "utils/pg_crc.h" #include "catalog/pg_control.h" @@ -684,8 +686,6 @@ typedef struct StopBackupCallbackParams strspn(fname, "0123456789ABCDEF") == XLOG_FNAME_LEN && \ strcmp((fname) + XLOG_FNAME_LEN, ".gz") == 0) -#if PG_VERSION_NUM >= 110000 - #define WalSegmentOffset(xlogptr, wal_segsz_bytes) \ XLogSegmentOffset(xlogptr, wal_segsz_bytes) #define GetXLogSegNo(xlrp, logSegNo, wal_segsz_bytes) \ @@ -706,28 +706,6 @@ typedef struct StopBackupCallbackParams #define GetXLogFromFileName(fname, tli, logSegNo, wal_segsz_bytes) \ XLogFromFileName(fname, tli, logSegNo, wal_segsz_bytes) -#else -#define WalSegmentOffset(xlogptr, wal_segsz_bytes) \ - ((xlogptr) & ((XLogSegSize) - 1)) -#define GetXLogSegNo(xlrp, logSegNo, wal_segsz_bytes) \ - XLByteToSeg(xlrp, logSegNo) -#define GetXLogRecPtr(segno, offset, wal_segsz_bytes, dest) \ - XLogSegNoOffsetToRecPtr(segno, offset, dest) -#define GetXLogFileName(fname, tli, logSegNo, wal_segsz_bytes) \ - XLogFileName(fname, tli, logSegNo) -#define IsInXLogSeg(xlrp, logSegNo, wal_segsz_bytes) \ - XLByteInSeg(xlrp, logSegNo) -#define GetXLogSegName(fname, logSegNo, wal_segsz_bytes) \ - snprintf(fname, 20, "%08X%08X",\ - (uint32) ((logSegNo) / XLogSegmentsPerXLogId), \ - (uint32) ((logSegNo) % XLogSegmentsPerXLogId)) - -#define GetXLogSegNoFromScrath(logSegNo, log, seg, wal_segsz_bytes) \ - logSegNo = (uint64) log * XLogSegmentsPerXLogId + seg - -#define GetXLogFromFileName(fname, tli, logSegNo, wal_segsz_bytes) \ - XLogFromFileName(fname, tli, logSegNo) -#endif #define IsPartialCompressXLogFileName(fname) \ (strlen(fname) == XLOG_FNAME_LEN + strlen(".gz.partial") && \ diff --git a/src/stream.c b/src/stream.c index 05c60d204..027d70c05 100644 --- a/src/stream.c +++ b/src/stream.c @@ -188,22 +188,8 @@ CreateReplicationSlot_compat(PGconn *conn, const char *slot_name, const char *pl bool is_temporary, bool is_physical, bool slot_exists_ok) { -#if PG_VERSION_NUM >= 150000 - return CreateReplicationSlot(conn, slot_name, plugin, is_temporary, is_physical, - /* reserve_wal = */ true, slot_exists_ok, /* two_phase = */ false); -#elif PG_VERSION_NUM >= 110000 return CreateReplicationSlot(conn, slot_name, plugin, is_temporary, is_physical, /* reserve_wal = */ true, slot_exists_ok); -#else - /* - * PG-10 doesn't support creating temp_slot by calling CreateReplicationSlot(), but - * it will be created by setting StreamCtl.temp_slot later in StreamLog() - */ - if (!is_temporary) - return CreateReplicationSlot(conn, slot_name, plugin, /*is_temporary,*/ is_physical, /*reserve_wal,*/ slot_exists_ok); - else - return true; -#endif } /* @@ -260,26 +246,15 @@ StreamLog(void *arg) ctl.synchronous = false; ctl.mark_done = false; -#if PG_VERSION_NUM >= 150000 ctl.walmethod = CreateWalDirectoryMethod( stream_arg->basedir, - PG_COMPRESSION_NONE, 0, false); -#else /* PG_VERSION_NUM >= 100000 && PG_VERSION_NUM < 150000 */ - ctl.walmethod = CreateWalDirectoryMethod( - stream_arg->basedir, - 0, - false); -#endif /* PG_VERSION_NUM >= 150000 */ + ctl.replication_slot = replication_slot; ctl.stop_socket = PGINVALID_SOCKET; ctl.do_sync = false; /* We sync all files at the end of backup */ // ctl.mark_done /* for future use in s3 */ -#if PG_VERSION_NUM < 110000 - /* StreamCtl.temp_slot used only for PG-10, in PG>10, temp_slots are created by calling CreateReplicationSlot() */ - ctl.temp_slot = temp_slot; -#endif /* PG_VERSION_NUM < 110000 */ if (ReceiveXlogStream(stream_arg->conn, &ctl) == false) { diff --git a/src/utils/pgut.c b/src/utils/pgut.c index 044b5bf7a..81f1805ac 100644 --- a/src/utils/pgut.c +++ b/src/utils/pgut.c @@ -27,6 +27,7 @@ #include "pgut.h" #include "logger.h" #include "file.h" +#include "simple_prompt.h" static char *password = NULL; @@ -81,26 +82,15 @@ prompt_for_password(const char *username) password = NULL; } -#if PG_VERSION_NUM >= 140000 - if (username == NULL) - password = simple_prompt("Password: ", false); - else - { - char message[256]; - snprintf(message, lengthof(message), "Password for user %s: ", username); - password = simple_prompt(message , false); - } -#else password = (char *) pgut_malloc(sizeof(char) * 100 + 1); if (username == NULL) - simple_prompt("Password: ", password, 100, false); + simple_prompt_compat("Password: ", password, 100, false); else { char message[256]; snprintf(message, lengthof(message), "Password for user %s: ", username); - simple_prompt(message, password, 100, false); + simple_prompt_compat(message, password, 100, false); } -#endif in_password = false; } diff --git a/src/utils/simple_prompt.c b/src/utils/simple_prompt.c new file mode 100644 index 000000000..79f49e1b0 --- /dev/null +++ b/src/utils/simple_prompt.c @@ -0,0 +1,159 @@ +#include "c.h" + +#ifdef HAVE_TERMIOS_H +#include +#endif +#include "simple_prompt.h" + +/* + * simple_prompt + * + * Generalized function especially intended for reading in usernames and + * passwords interactively. Reads from /dev/tty or stdin/stderr. + * + * prompt: The prompt to print, or NULL if none (automatically localized) + * destination: buffer in which to store result + * destlen: allocated length of destination + * echo: Set to false if you want to hide what is entered (for passwords) + * + * The input (without trailing newline) is returned in the destination buffer, + * with a '\0' appended. + */ +void +simple_prompt_compat(const char *prompt, char *destination, size_t destlen, bool echo) +{ + int length; + FILE *termin, + *termout; + +#if defined(HAVE_TERMIOS_H) + struct termios t_orig, + t; +#elif defined(WIN32) + HANDLE t = NULL; + DWORD t_orig = 0; +#endif + +#ifdef WIN32 + + /* + * A Windows console has an "input code page" and an "output code page"; + * these usually match each other, but they rarely match the "Windows ANSI + * code page" defined at system boot and expected of "char *" arguments to + * Windows API functions. The Microsoft CRT write() implementation + * automatically converts text between these code pages when writing to a + * console. To identify such file descriptors, it calls GetConsoleMode() + * on the underlying HANDLE, which in turn requires GENERIC_READ access on + * the HANDLE. Opening termout in mode "w+" allows that detection to + * succeed. Otherwise, write() would not recognize the descriptor as a + * console, and non-ASCII characters would display incorrectly. + * + * XXX fgets() still receives text in the console's input code page. This + * makes non-ASCII credentials unportable. + * + * Unintuitively, we also open termin in mode "w+", even though we only + * read it; that's needed for SetConsoleMode() to succeed. + */ + termin = fopen("CONIN$", "w+"); + termout = fopen("CONOUT$", "w+"); +#else + + /* + * Do not try to collapse these into one "w+" mode file. Doesn't work on + * some platforms (eg, HPUX 10.20). + */ + termin = fopen("/dev/tty", "r"); + termout = fopen("/dev/tty", "w"); +#endif + if (!termin || !termout +#ifdef WIN32 + + /* + * Direct console I/O does not work from the MSYS 1.0.10 console. Writes + * reach nowhere user-visible; reads block indefinitely. XXX This affects + * most Windows terminal environments, including rxvt, mintty, Cygwin + * xterm, Cygwin sshd, and PowerShell ISE. Switch to a more-generic test. + */ + || (getenv("OSTYPE") && strcmp(getenv("OSTYPE"), "msys") == 0) +#endif + ) + { + if (termin) + fclose(termin); + if (termout) + fclose(termout); + termin = stdin; + termout = stderr; + } + + if (!echo) + { +#if defined(HAVE_TERMIOS_H) + /* disable echo via tcgetattr/tcsetattr */ + tcgetattr(fileno(termin), &t); + t_orig = t; + t.c_lflag &= ~ECHO; + tcsetattr(fileno(termin), TCSAFLUSH, &t); +#elif defined(WIN32) + /* need the file's HANDLE to turn echo off */ + t = (HANDLE) _get_osfhandle(_fileno(termin)); + + /* save the old configuration first */ + GetConsoleMode(t, &t_orig); + + /* set to the new mode */ + SetConsoleMode(t, ENABLE_LINE_INPUT | ENABLE_PROCESSED_INPUT); +#endif + } + + if (prompt) + { + fputs(_(prompt), termout); + fflush(termout); + } + + if (fgets(destination, destlen, termin) == NULL) + destination[0] = '\0'; + + length = strlen(destination); + if (length > 0 && destination[length - 1] != '\n') + { + /* eat rest of the line */ + char buf[128]; + int buflen; + + do + { + if (fgets(buf, sizeof(buf), termin) == NULL) + break; + buflen = strlen(buf); + } while (buflen > 0 && buf[buflen - 1] != '\n'); + } + + /* strip trailing newline, including \r in case we're on Windows */ + while (length > 0 && + (destination[length - 1] == '\n' || + destination[length - 1] == '\r')) + destination[--length] = '\0'; + + if (!echo) + { + /* restore previous echo behavior, then echo \n */ +#if defined(HAVE_TERMIOS_H) + tcsetattr(fileno(termin), TCSAFLUSH, &t_orig); + fputs("\n", termout); + fflush(termout); +#elif defined(WIN32) + SetConsoleMode(t, t_orig); + fputs("\n", termout); + fflush(termout); +#endif + } + + if (termin != stdin) + { + fclose(termin); + fclose(termout); + } +} + diff --git a/src/utils/simple_prompt.h b/src/utils/simple_prompt.h new file mode 100644 index 000000000..0b6fc2608 --- /dev/null +++ b/src/utils/simple_prompt.h @@ -0,0 +1,7 @@ +#ifndef SIMPLE_PROMPT_H +#define SIMPLE_PROMPT_H + +extern void +simple_prompt_compat(const char *prompt, char *destination, size_t destlen, bool echo); + +#endif From 1cd13553619cc2fd657911239137865375ff59c2 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 30 Nov 2022 08:07:31 +0300 Subject: [PATCH 114/339] configuration.c: rework parse_pair and config_read_opt --- src/fu_util/ft_util.h | 22 +++- src/fu_util/impl/ft_impl.c | 92 ++++++++++++++++ src/fu_util/impl/ft_impl.h | 86 ++++++++------- src/utils/configuration.c | 214 +++++++++++++++++-------------------- 4 files changed, 258 insertions(+), 156 deletions(-) diff --git a/src/fu_util/ft_util.h b/src/fu_util/ft_util.h index 20a03a3fb..9d482f791 100644 --- a/src/fu_util/ft_util.h +++ b/src/fu_util/ft_util.h @@ -317,6 +317,10 @@ ft_inline ft_bytes_t ft_bytes(void* ptr, size_t len) { return (ft_bytes_t){.ptr = (char*)ptr, .len = len}; } +ft_inline ft_bytes_t ft_bytesc(const char* ptr) { + return (ft_bytes_t){.ptr = (char*)ptr, .len = strlen(ptr)}; +} + ft_inline ft_bytes_t ft_bytes_alloc(size_t sz) { return ft_bytes(ft_malloc(sz), sz); } @@ -328,12 +332,21 @@ ft_inline void ft_bytes_free(ft_bytes_t* bytes) { ft_inline void ft_bytes_consume(ft_bytes_t *bytes, size_t cut); ft_inline void ft_bytes_move(ft_bytes_t *dest, ft_bytes_t *src); +ft_inline ft_bytes_t ft_bytes_split(ft_bytes_t *bytes, size_t n); -ft_inline ft_bytes_t ft_bytes_shift_line(ft_bytes_t *bytes); -ft_inline size_t ft_bytes_find_bytes(ft_bytes_t haystack, ft_bytes_t needle); +extern ft_bytes_t ft_bytes_shift_line(ft_bytes_t *bytes); +extern size_t ft_bytes_find_bytes(ft_bytes_t haystack, ft_bytes_t needle); ft_inline size_t ft_bytes_find_cstr(ft_bytes_t haystack, const char *needle); ft_inline bool ft_bytes_has_cstr(ft_bytes_t haystack, const char *needle); +ft_inline bool ft_bytes_starts_with(ft_bytes_t haystack, ft_bytes_t needle); +ft_inline bool ft_bytes_starts_withc(ft_bytes_t haystack, const char* needle); + +ft_inline size_t ft_bytes_spn(ft_bytes_t bytes, ft_bytes_t chars); +ft_inline size_t ft_bytes_notspn(ft_bytes_t bytes, ft_bytes_t chars); +ft_inline size_t ft_bytes_spnc(ft_bytes_t bytes, const char* chars); +ft_inline size_t ft_bytes_notspnc(ft_bytes_t bytes, const char* chars); + // String utils extern size_t ft_strlcpy(char *dest, const char* src, size_t dest_size); /* @@ -458,6 +471,11 @@ extern bool ft_strbuf_vcatf_err (ft_strbuf_t *buf, bool err[1], */ ft_inline ft_str_t ft_strbuf_ref(ft_strbuf_t *buf); +/* + * Reset buffer's len to 0 without deallocation. + */ +ft_inline void ft_strbuf_reset_for_reuse(ft_strbuf_t *buf); + /* * Free buffer's buffer, if it was allocated */ diff --git a/src/fu_util/impl/ft_impl.c b/src/fu_util/impl/ft_impl.c index 15f56467a..ef93f2aed 100644 --- a/src/fu_util/impl/ft_impl.c +++ b/src/fu_util/impl/ft_impl.c @@ -640,3 +640,95 @@ ft_small_cstr_hash(const char *key) { return h2; } +// bytes + +ft_bytes_t +ft_bytes_shift_line(ft_bytes_t *bytes) +{ + size_t i; + char *p = bytes->ptr; + + for (i = 0; i < bytes->len; i++) { + if (p[i] == '\r' || p[i] == '\n') { + if (p[i] == '\r' && i+1 < bytes->len && p[i+1] == '\n') + i++; + ft_bytes_consume(bytes, i+1); + return ft_bytes(p, i+1); + } + } + + ft_bytes_consume(bytes, bytes->len); + return ft_bytes(p, i); +} + + +size_t +ft_bytes_find_bytes(ft_bytes_t haystack, ft_bytes_t needle) +{ + // TODO use memmem if present + size_t i; + char first; + + if (needle.len == 0) + return 0; + if (needle.len > haystack.len) + return haystack.len; + + first = needle.ptr[0]; + for (i = 0; i < haystack.len - needle.len; i++) + { + if (haystack.ptr[i] != first) + continue; + if (memcmp(haystack.ptr + i, needle.ptr, needle.len) == 0) + return i; + } + + return haystack.len; +} + +size_t +ft_bytes_spn_impl(ft_bytes_t bytes, ft_bytes_t chars, bool include) +{ + /* 32*8 = 256 bit */ + uint32_t mask[8] = {0}; + size_t i; + unsigned char c; + + if (chars.len == 0) + return 0; + + if (chars.len == 1 && include) { + c = chars.ptr[0]; + for (i = 0; i < bytes.len; i++) + if (bytes.ptr[i] != c) + return i; + return bytes.len; + } else if (chars.len == 1 && !include) { + c = chars.ptr[0]; + for (i = 0; i < bytes.len; i++) + if (bytes.ptr[i] == c) + return i; + return bytes.len; + } + + for (i = 0; i < chars.len; i++) { + c = chars.ptr[i]; + mask[c/32] |= 1 << (c&31); + } + + if (include) { + for (i = 0; i < bytes.len; i++) { + c = bytes.ptr[i]; + if ((mask[c / 32] & (1 << (c & 31))) == 0) + return i; + } + } else { + for (i = 0; i < bytes.len; i++) { + c = bytes.ptr[i]; + if ((mask[c / 32] & (1 << (c & 31))) != 0) + return i; + } + } + + return bytes.len; +} \ No newline at end of file diff --git a/src/fu_util/impl/ft_impl.h b/src/fu_util/impl/ft_impl.h index a90b74bdc..ec0bec93f 100644 --- a/src/fu_util/impl/ft_impl.h +++ b/src/fu_util/impl/ft_impl.h @@ -314,10 +314,19 @@ ft__slcindex_unify(ssize_t end, size_t len) { // Bytes +ft_inline ft_bytes_t +ft_bytes_split(ft_bytes_t *bytes, size_t n) { + ft_dbg_assert(n <= bytes->len); + ft_bytes_t head = ft_bytes(bytes->ptr, n); + bytes->ptr += n; + bytes->len -= n; + return head; +} + ft_inline void ft_bytes_consume(ft_bytes_t *bytes, size_t cut) { ft_dbg_assert(cut <= bytes->len); - bytes->ptr = bytes->ptr + cut; + bytes->ptr += cut; bytes->len -= cut; } @@ -329,47 +338,17 @@ ft_bytes_move(ft_bytes_t *dest, ft_bytes_t *src) { ft_bytes_consume(src, len); } -ft_inline ft_bytes_t -ft_bytes_shift_line(ft_bytes_t *bytes) +ft_inline bool +ft_bytes_starts_with(ft_bytes_t haystack, ft_bytes_t needle) { - size_t i; - char *p = bytes->ptr; - - for (i = 0; i < bytes->len; i++) { - if (p[i] == '\r' || p[i] == '\n') { - if (p[i] == '\r' && i+1 < bytes->len && p[i+1] == '\n') - i++; - ft_bytes_consume(bytes, i+1); - return ft_bytes(p, i+1); - } - } - - ft_bytes_consume(bytes, bytes->len); - return ft_bytes(p, i); + return haystack.len >= needle.len && + memcmp(haystack.ptr, needle.ptr, needle.len) == 0; } -ft_inline size_t -ft_bytes_find_bytes(ft_bytes_t haystack, ft_bytes_t needle) +ft_inline bool +ft_bytes_starts_withc(ft_bytes_t haystack, const char* needle) { - // TODO use memmem if present - size_t i; - char first; - - if (needle.len == 0) - return 0; - if (needle.len > haystack.len) - return haystack.len; - - first = needle.ptr[0]; - for (i = 0; i < haystack.len - needle.len; i++) - { - if (haystack.ptr[i] != first) - continue; - if (memcmp(haystack.ptr + i, needle.ptr, needle.len) == 0) - return i; - } - - return haystack.len; + return ft_bytes_starts_with(haystack, ft_bytesc(needle)); } ft_inline size_t @@ -385,6 +364,32 @@ ft_bytes_has_cstr(ft_bytes_t haystack, const char* needle) return pos != haystack.len; } +extern size_t ft_bytes_spn_impl(ft_bytes_t bytes, ft_bytes_t chars, bool include); + +ft_inline size_t +ft_bytes_spn(ft_bytes_t bytes, ft_bytes_t chars) +{ + return ft_bytes_spn_impl(bytes, chars, true); +} + +ft_inline size_t +ft_bytes_notspn(ft_bytes_t bytes, ft_bytes_t chars) +{ + return ft_bytes_spn_impl(bytes, chars, false); +} + +ft_inline size_t +ft_bytes_spnc(ft_bytes_t bytes, const char* chars) +{ + return ft_bytes_spn(bytes, ft_bytesc(chars)); +} + +ft_inline size_t +ft_bytes_notspnc(ft_bytes_t bytes, const char* chars) +{ + return ft_bytes_notspn(bytes, ft_bytesc(chars)); +} + // String utils ft_inline char * @@ -556,6 +561,11 @@ ft_strbuf_catc(ft_strbuf_t *buf, const char *s) { return ft_strbuf_cat(buf, ft_cstr(s)); } +ft_inline void +ft_strbuf_reset_for_reuse(ft_strbuf_t *buf) { + buf->len = 0; +} + ft_inline void ft_strbuf_free(ft_strbuf_t *buf) { if (buf->alloced) { diff --git a/src/utils/configuration.c b/src/utils/configuration.c index 5622532fc..1682a0402 100644 --- a/src/utils/configuration.c +++ b/src/utils/configuration.c @@ -224,6 +224,16 @@ longopts_to_optstring(const struct option opts[], const size_t len) return result; } +static inline char +key_char(char c) +{ + /* '-', '_' and ' ' are equal */ + if (c == '_' || c == ' ') + return '-'; + else + return ToLower(c); +} + /* * Compare two strings ignore cases and ignore. */ @@ -231,15 +241,8 @@ static bool key_equals(const char *lhs, const char *rhs) { for (; *lhs && *rhs; lhs++, rhs++) - { - if (strchr("-_ ", *lhs)) - { - if (!strchr("-_ ", *rhs)) - return false; - } - else if (ToLower(*lhs) != ToLower(*rhs)) + if (key_char(*lhs) != key_char(*rhs)) return false; - } return *lhs == '\0' && *rhs == '\0'; } @@ -356,110 +359,92 @@ assign_option(ConfigOption *opt, const char *optarg, OptionSource src) } } -static const char * -skip_space(const char *str, const char *line) -{ - while (IsSpace(*str)) { str++; } - return str; -} +#define SPACES " \t\n\v\f\r" -static const char * -get_next_token(const char *src, char *dst, const char *line) +static bool +get_next_token(ft_bytes_t *src, ft_strbuf_t *dest) { - const char *s; - int i; - int j; + ft_bytes_t val; - if ((s = skip_space(src, line)) == NULL) - return NULL; + ft_bytes_consume(src, ft_bytes_spnc(*src, SPACES)); /* parse quoted string */ - if (*s == '\'') + if (ft_bytes_starts_withc(*src, "\'")) { - s++; - for (i = 0, j = 0; s[i] != '\0'; i++) + bool seen_quote = false; + + ft_bytes_consume(src, 1); + while (src->len) { - if (s[i] == '\'') + char c = src->ptr[0]; + ft_bytes_consume(src, 1); + /* doubled quote becomes just one quote */ + if (c == '\'' && seen_quote) { - i++; - /* doubled quote becomes just one quote */ - if (s[i] == '\'') - dst[j] = s[i]; - else - break; + ft_strbuf_cat1(dest, '\''); + seen_quote = false; } + else if (c == '\'') + seen_quote = true; + else if (seen_quote) /* previous char was closing quote */ + return true; else - dst[j] = s[i]; - j++; + ft_strbuf_cat1(dest, c); } + /* last char was closing quote */ + return seen_quote; } else { - i = j = strcspn(s, "#\n\r\t\v"); - memcpy(dst, s, j); + val = ft_bytes_split(src, ft_bytes_notspnc(*src, "#"SPACES)); + ft_strbuf_catbytes(dest, val); } - dst[j] = '\0'; - return s + i; + return true; } -static bool -parse_pair(const char buffer[], char key[], char value[]) -{ - const char *start; - const char *end; +enum pair_result { + PAIR_OK, + PAIR_EMPTY, + PAIR_ERROR, +}; - key[0] = value[0] = '\0'; +static enum pair_result +parse_pair(ft_bytes_t buffer, ft_strbuf_t *keybuf, ft_strbuf_t *valuebuf) +{ + ft_bytes_t key; /* * parse key */ - start = buffer; - if ((start = skip_space(start, buffer)) == NULL) - return false; - - end = start + strcspn(start, "=# \n\r\t\v"); + ft_bytes_consume(&buffer, ft_bytes_spnc(buffer, SPACES)); + key = ft_bytes_split(&buffer, ft_bytes_notspnc(buffer, "=#"SPACES)); + ft_bytes_consume(&buffer, ft_bytes_spnc(buffer, SPACES)); - /* skip blank buffer */ - if (end - start <= 0) + if (key.len == 0) { - if (*start == '=') - elog(ERROR, "Syntax error in \"%s\"", buffer); - return false; + if (ft_bytes_starts_withc(buffer, "=")) + return PAIR_ERROR; + return PAIR_EMPTY; } - /* key found */ - strncpy(key, start, end - start); - key[end - start] = '\0'; + if (!ft_bytes_starts_withc(buffer, "=")) + return PAIR_ERROR; - /* find key and value split char */ - if ((start = skip_space(end, buffer)) == NULL) - return false; + ft_strbuf_catbytes(keybuf, key); - if (*start != '=') - { - elog(ERROR, "Syntax error in \"%s\"", buffer); - return false; - } + ft_bytes_consume(&buffer, 1); - start++; + /* take value */ + if (!get_next_token(&buffer, valuebuf)) + return PAIR_ERROR; - /* - * parse value - */ - if ((end = get_next_token(start, value, buffer)) == NULL) - return false; + ft_bytes_consume(&buffer, ft_bytes_spnc(buffer, SPACES)); - if ((start = skip_space(end, buffer)) == NULL) - return false; + if (buffer.len != 0 && buffer.ptr[0] != '#') + return PAIR_ERROR; - if (*start != '\0' && *start != '#') - { - elog(ERROR, "Syntax error in \"%s\"", buffer); - return false; - } - - return true; + return PAIR_OK; } /* @@ -544,16 +529,6 @@ config_get_opt(int argc, char **argv, ConfigOption cmd_options[], return optind; } -static void -ft_bytes_strip_right(ft_bytes_t *line) -{ - size_t i; - - for (i = line->len; i > 0 && IsSpace(line->ptr[i - 1]); i--) - line->ptr[i - 1] = '\0'; - line->len = i; -} - /* * Get configuration from configuration file. * Return number of parsed options. @@ -563,9 +538,10 @@ config_read_opt(const char *path, ConfigOption options[], int elevel, bool strict, bool missing_ok) { pioDrive_i local_drive = pioDriveForLocation(FIO_BACKUP_HOST); - char key[1024]; - char value[2048]; + ft_strbuf_t key = ft_strbuf_zero(); + ft_strbuf_t value = ft_strbuf_zero(); int parsed_options = 0; + int lno = 0; err_i err = $noerr(); ft_bytes_t config_file, to_free; @@ -576,52 +552,58 @@ config_read_opt(const char *path, ConfigOption options[], int elevel, .err = &err); if ($haserr(err)) { - ft_bytes_free(&config_file); - if (missing_ok && getErrno(err) == ENOENT) - return parsed_options; + return 0; ft_logerr(FT_FATAL, $errmsg(err), "could not read file"); - return parsed_options; + return 0; } to_free = config_file; - while (true) + while (config_file.len > 0) { size_t i; ft_bytes_t line = ft_bytes_shift_line(&config_file); + enum pair_result pr; - if (line.len == 0) - break; + lno++; + pr = parse_pair(line, &key, &value); + if (pr == PAIR_EMPTY) + continue; - ft_bytes_strip_right(&line); + if (pr == PAIR_ERROR) + elog(ERROR, "Syntax error on %s:%d: %.*s", + path, lno, (int)line.len, line.ptr); - if (parse_pair(line.ptr, key, value)) + for (i = 0; options[i].type; i++) { - for (i = 0; options[i].type; i++) - { - ConfigOption *opt = &options[i]; + ConfigOption *opt = &options[i]; - if (key_equals(key, opt->lname)) + if (key_equals(key.ptr, opt->lname)) + { + if (opt->allowed < SOURCE_FILE && + opt->allowed != SOURCE_FILE_STRICT) + elog(elevel, "Option %s cannot be specified in file", + opt->lname); + else if (opt->source <= SOURCE_FILE) { - if (opt->allowed < SOURCE_FILE && - opt->allowed != SOURCE_FILE_STRICT) - elog(elevel, "Option %s cannot be specified in file", - opt->lname); - else if (opt->source <= SOURCE_FILE) - { - assign_option(opt, value, SOURCE_FILE); - parsed_options++; - } - break; + assign_option(opt, value.ptr, SOURCE_FILE); + parsed_options++; } + break; } - if (strict && !options[i].type) - elog(elevel, "Invalid option \"%s\" in file \"%s\"", key, path); } + + if (strict && !options[i].type) + elog(elevel, "Invalid option \"%s\" in file \"%s\"", key.ptr, path); + + ft_strbuf_reset_for_reuse(&key); + ft_strbuf_reset_for_reuse(&value); } ft_bytes_free(&to_free); + ft_strbuf_free(&key); + ft_strbuf_free(&value); return parsed_options; } From 6a684d89b5b2d8018d7047bbbcba1a633163345f Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 30 Nov 2022 08:07:44 +0300 Subject: [PATCH 115/339] ... --- src/fu_util/ft_util.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/fu_util/ft_util.h b/src/fu_util/ft_util.h index 9d482f791..4107bcb4b 100644 --- a/src/fu_util/ft_util.h +++ b/src/fu_util/ft_util.h @@ -336,8 +336,8 @@ ft_inline ft_bytes_t ft_bytes_split(ft_bytes_t *bytes, size_t n); extern ft_bytes_t ft_bytes_shift_line(ft_bytes_t *bytes); extern size_t ft_bytes_find_bytes(ft_bytes_t haystack, ft_bytes_t needle); -ft_inline size_t ft_bytes_find_cstr(ft_bytes_t haystack, const char *needle); -ft_inline bool ft_bytes_has_cstr(ft_bytes_t haystack, const char *needle); +ft_inline size_t ft_bytes_find_cstr(ft_bytes_t haystack, const char *needle); +ft_inline bool ft_bytes_has_cstr(ft_bytes_t haystack, const char *needle); ft_inline bool ft_bytes_starts_with(ft_bytes_t haystack, ft_bytes_t needle); ft_inline bool ft_bytes_starts_withc(ft_bytes_t haystack, const char* needle); From d50b0e20b46e075993865b3964dbe8a25e842c2d Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 30 Nov 2022 21:46:28 +0300 Subject: [PATCH 116/339] a bit of improvement to pioCopy/pioReadFull and $iresult --- src/fu_util/impl/fo_impl.c | 5 +++++ src/utils/file.c | 35 +++++++++++++++++++---------------- 2 files changed, 24 insertions(+), 16 deletions(-) diff --git a/src/fu_util/impl/fo_impl.c b/src/fu_util/impl/fo_impl.c index 3547c1d7c..52884d27e 100644 --- a/src/fu_util/impl/fo_impl.c +++ b/src/fu_util/impl/fo_impl.c @@ -1481,6 +1481,9 @@ static fobj_t fobj_autorelease(fobj_t obj, fobj_autorelease_pool *pool) { fobj_autorelease_chunk *chunk, *new_chunk; + if (obj == NULL) + return NULL; + ft_assert(pool != NULL); chunk = pool->last; @@ -1496,6 +1499,8 @@ fobj_autorelease(fobj_t obj, fobj_autorelease_pool *pool) { fobj_t fobj_store_to_parent_pool(fobj_t obj, fobj_autorelease_pool *child_pool_or_null) { + if (obj == NULL) + return NULL; return fobj_autorelease(obj, (child_pool_or_null ?: *fobj_AR_current_ptr())->ref.parent); } diff --git a/src/utils/file.c b/src/utils/file.c index 7519d2f5d..dba2826c6 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -5952,7 +5952,9 @@ pioCopyWithFilters(pioWriteFlush_i dest, pioRead_i src, { FOBJ_FUNC_ARP(); size_t _fallback_copied = 0; - err_i err = $noerr(); + err_i err = $noerr(); + err_i rerr = $noerr(); + err_i werr = $noerr(); void* buf; int i; @@ -5972,38 +5974,37 @@ pioCopyWithFilters(pioWriteFlush_i dest, pioRead_i src, buf = fobj_alloc_temp(OUT_BUF_SIZE); - for (;;) + while (!$haserr(rerr) && !$haserr(werr)) { size_t read_len = 0; size_t write_len = 0; - read_len = $i(pioRead, src, ft_bytes(buf, OUT_BUF_SIZE), &err); - - if ($haserr(err)) - $ireturn(err); + read_len = pioReadFull(src, ft_bytes(buf, OUT_BUF_SIZE), &rerr); if (read_len == 0) break; - write_len = $i(pioWrite, dest, ft_bytes(buf, read_len), &err); - if (write_len != read_len || $haserr(err)) + write_len = $i(pioWrite, dest, ft_bytes(buf, read_len), &werr); + if (write_len != read_len || $haserr(werr)) { - if ($haserr(err)) - $ireturn(err); - - $ireturn($err(SysErr, "Short write to destination file {path}: {writtenSz} < {wantedSz}", + if (!$haserr(werr)) + werr = $err(SysErr, "Short write to destination file {path}: {writtenSz} < {wantedSz}", path($irepr(dest)), - wantedSz(read_len), writtenSz(write_len))); + wantedSz(read_len), writtenSz(write_len)); } *copied += write_len; } + err = fobj_err_combine(rerr, werr); + if ($haserr(err)) + return $iresult(err); + /* pioWriteFinish will check for async error if destination was remote */ err = $i(pioWriteFinish, dest); if ($haserr(err)) - $ireturn($err(SysErr, "Cannot flush file {path}: {cause}", - path($irepr(dest)), cause(err.self))); - return $noerr(); + err = $err(SysErr, "Cannot flush file {path}: {cause}", + path($irepr(dest)), cause(err.self)); + return $iresult(err); } size_t @@ -6021,6 +6022,8 @@ pioReadFull(pioRead_i src, ft_bytes_t bytes, err_i* err) ft_bytes_consume(&b, r); if ($haserr(*err)) break; + if (r == 0) + break; } return bytes.len - b.len; } From abacfb23cfc170adae81767f76a9c7c414794280 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 1 Dec 2022 06:45:08 +0300 Subject: [PATCH 117/339] ft_strbuf_t overflowed --- src/fu_util/ft_util.h | 1 + src/fu_util/impl/ft_impl.c | 2 ++ src/fu_util/impl/ft_impl.h | 20 ++++++++++++++------ 3 files changed, 17 insertions(+), 6 deletions(-) diff --git a/src/fu_util/ft_util.h b/src/fu_util/ft_util.h index 4107bcb4b..ab025ac03 100644 --- a/src/fu_util/ft_util.h +++ b/src/fu_util/ft_util.h @@ -414,6 +414,7 @@ struct ft_strbuf_t { /* could buffer grow? * It could be set on initialization, of if buffer reaches 4GB limit */ bool fixed; + bool overflowed; /* does ptr points to malloced place? */ /* if so, then ft_strbuf_finish would not strdup */ bool alloced; diff --git a/src/fu_util/impl/ft_impl.c b/src/fu_util/impl/ft_impl.c index ef93f2aed..ba6bb6664 100644 --- a/src/fu_util/impl/ft_impl.c +++ b/src/fu_util/impl/ft_impl.c @@ -189,6 +189,7 @@ ft__strbuf_ensure(ft_strbuf_t *buf, size_t n) { buf->cap = new_cap-1; buf->alloced = true; buf->fixed = overflowed; + buf->overflowed = overflowed; return !overflowed; } @@ -245,6 +246,7 @@ ft_strbuf_vcatf_err(ft_strbuf_t *buf, bool err[1], const char *fmt, va_list args } if (overflowed) { buf->len = buf->cap; + buf->overflowed = true; return false; } } diff --git a/src/fu_util/impl/ft_impl.h b/src/fu_util/impl/ft_impl.h index ec0bec93f..12291db39 100644 --- a/src/fu_util/impl/ft_impl.h +++ b/src/fu_util/impl/ft_impl.h @@ -519,11 +519,13 @@ ft_strbuf_catbytes(ft_strbuf_t *buf, ft_bytes_t s) { return true; if (!ft_strbuf_ensure(buf, s.len)) { s.len = buf->cap - buf->len; - ft_assert(s.len > 0); + buf->overflowed = true; + } + if (s.len > 0) { + memmove(buf->ptr + buf->len, s.ptr, s.len); + buf->len += s.len; + buf->ptr[buf->len] = '\0'; } - memmove(buf->ptr + buf->len, s.ptr, s.len); - buf->len += s.len; - buf->ptr[buf->len] = '\0'; return ft_strbuf_may(buf); } @@ -535,6 +537,8 @@ ft_strbuf_cat1(ft_strbuf_t *buf, char c) { buf->ptr[buf->len+0] = c; buf->ptr[buf->len+1] = '\0'; buf->len++; + } else { + buf->overflowed = true; } return ft_strbuf_may(buf); } @@ -548,10 +552,13 @@ ft_strbuf_cat2(ft_strbuf_t *buf, char c1, char c2) { buf->ptr[buf->len+1] = c1; buf->ptr[buf->len+2] = '\0'; buf->len+=2; - } else { + } else if (ft_strbuf_ensure(buf, 1)){ buf->ptr[buf->len+0] = c1; buf->ptr[buf->len+1] = '\0'; buf->len++; + buf->overflowed = true; + } else { + buf->overflowed = true; } return ft_strbuf_may(buf); } @@ -563,7 +570,8 @@ ft_strbuf_catc(ft_strbuf_t *buf, const char *s) { ft_inline void ft_strbuf_reset_for_reuse(ft_strbuf_t *buf) { - buf->len = 0; + buf->len = 0; + buf->overflowed = false; } ft_inline void From a9b72d86a01ac49cd37aa63256e7b18f2e02dc3c Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 1 Dec 2022 06:54:27 +0300 Subject: [PATCH 118/339] ft_cstrdupn --- src/fu_util/ft_util.h | 2 ++ src/fu_util/impl/ft_impl.h | 18 +++++++++++++----- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/src/fu_util/ft_util.h b/src/fu_util/ft_util.h index ab025ac03..5fe08678c 100644 --- a/src/fu_util/ft_util.h +++ b/src/fu_util/ft_util.h @@ -357,6 +357,7 @@ extern size_t ft_strlcat(char *dest, const char* src, size_t dest_size); /* dup string using ft_malloc */ ft_inline char * ft_cstrdup(const char *str); +ft_inline char * ft_cstrdupn(const char *str, size_t n); /**************** * String @@ -385,6 +386,7 @@ ft_inline ft_bytes_t ft_str2bytes_withzb(ft_str_t str) { ft_inline ft_str_t ft_strdup(ft_str_t str); ft_inline ft_str_t ft_strdupc(const char* str); +ft_inline ft_str_t ft_strdup_bytes(ft_bytes_t bytes); /* use only if string was allocated */ ft_inline void ft_str_free(ft_str_t *str); diff --git a/src/fu_util/impl/ft_impl.h b/src/fu_util/impl/ft_impl.h index 12291db39..22a956e28 100644 --- a/src/fu_util/impl/ft_impl.h +++ b/src/fu_util/impl/ft_impl.h @@ -397,15 +397,23 @@ ft_cstrdup(const char *str) { return (char*)ft_strdupc(str).ptr; } +ft_inline char * +ft_cstrdupn(const char *str, size_t n) { + return (char*)ft_strdup_bytes(ft_bytes((char*)str, n)).ptr; +} + ft_inline ft_str_t ft_strdup(ft_str_t str) { + return ft_strdup_bytes(ft_bytes(str.ptr, str.len)); +} + +ft_inline ft_str_t +ft_strdup_bytes(ft_bytes_t str) { char *mem = ft_malloc(str.len + 1); if (str.ptr != NULL) - memcpy(mem, str.ptr, str.len+1); - else - mem[0] = '\0'; - str.ptr = mem; - return str; + memcpy(mem, str.ptr, str.len); + mem[str.len] = '\0'; + return ft_str(mem, str.len); } ft_inline ft_str_t From 44ff1630e2785eedfb136d4bb9ea413fd08f1ce6 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 1 Dec 2022 10:24:21 +0300 Subject: [PATCH 119/339] [PBCKP-349] use pioReadFile in read_tablespace_map --- src/dir.c | 51 ++++++++++++++++++++++++++------------------------- 1 file changed, 26 insertions(+), 25 deletions(-) diff --git a/src/dir.c b/src/dir.c index 469967599..180927985 100644 --- a/src/dir.c +++ b/src/dir.c @@ -840,51 +840,52 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba void read_tablespace_map(parray *links, const char *backup_dir) { - FILE *fp; char db_path[MAXPGPATH], map_path[MAXPGPATH]; - char buf[MAXPGPATH * 2]; + pioDrive_i drive; + ft_bytes_t content; + ft_bytes_t parse; + ft_bytes_t line; + err_i err = $noerr(); join_path_components(db_path, backup_dir, DATABASE_DIR); join_path_components(map_path, db_path, PG_TABLESPACE_MAP_FILE); - fp = fio_open_stream(FIO_BACKUP_HOST, map_path); - if (fp == NULL) - elog(ERROR, "Cannot open tablespace map file \"%s\": %s", map_path, strerror(errno)); + drive = pioDriveForLocation(FIO_BACKUP_HOST); - while (fgets(buf, lengthof(buf), fp)) + content = $i(pioReadFile, drive, .path = map_path, .binary = false, + .err = &err); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Reading tablespace map"); + + parse = content; + + while (parse.len) { - char link_name[MAXPGPATH]; - char *path; - int n = 0; + ft_bytes_t link_name; + ft_bytes_t path; pgFile *file; - int i = 0; - if (sscanf(buf, "%s %n", link_name, &n) != 1) - elog(ERROR, "invalid format found in \"%s\"", map_path); + line = ft_bytes_shift_line(&parse); - path = buf + n; + link_name = ft_bytes_split(&line, ft_bytes_notspnc(line, " ")); + ft_bytes_consume(&line, 1); + path = ft_bytes_split(&line, ft_bytes_notspnc(line, "\n\r")); - /* Remove newline character at the end of string if any */ - i = strcspn(path, "\n"); - if (strlen(path) > i) - path[i] = '\0'; + if (link_name.len == 0 || path.len == 0) + elog(ERROR, "invalid format found in \"%s\"", map_path); - file = pgut_new(pgFile); - memset(file, 0, sizeof(pgFile)); + file = pgut_new0(pgFile); /* follow the convention for pgFileFree */ - file->name = pgut_strdup(link_name); - file->linked = pgut_strdup(path); + file->name = ft_strdup_bytes(link_name).ptr; + file->linked = ft_strdup_bytes(path).ptr; canonicalize_path(file->linked); parray_append(links, file); } - if (ferror(fp)) - elog(ERROR, "Failed to read from file: \"%s\"", map_path); - - fio_close_stream(fp); + ft_bytes_free(&content); } /* From d5ef113a8374f5b7d258858101c9e5cdbc66a648 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 1 Dec 2022 11:59:01 +0300 Subject: [PATCH 120/339] [PBCKP-351] add new way to parse config options It is prerequisite for killing fio_open_stream in get_backup_list. --- src/catalog.c | 70 ++++---- src/dir.c | 320 ++++++++++++++++++++----------------- src/fu_util/ft_array.inc.h | 7 + src/pg_probackup.h | 17 +- src/utils/configuration.c | 2 - src/utils/pgut.h | 2 + 6 files changed, 237 insertions(+), 181 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index b197ca482..c9ef77889 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1019,6 +1019,7 @@ get_backup_filelist(pgBackup *backup, bool strict) char buf[BLCKSZ]; char stdio_buf[STDIO_BUFSIZE]; pg_crc32 content_crc = 0; + pb_control_line pb_line; join_path_components(backup_filelist_path, backup->root_dir, DATABASE_FILE_LIST); @@ -1034,12 +1035,14 @@ get_backup_filelist(pgBackup *backup, bool strict) INIT_CRC32C(content_crc); + init_pb_control_line(&pb_line); + while (fgets(buf, lengthof(buf), fp)) { - char path[MAXPGPATH]; - char linked[MAXPGPATH]; - char compress_alg_string[MAXPGPATH]; - char kind[16]; + ft_str_t path; + ft_str_t linked; + ft_str_t compress_alg; + ft_str_t kind; int64 write_size, uncompressed_size, mode, /* bit length of mode_t depends on platforms */ @@ -1058,59 +1061,66 @@ get_backup_filelist(pgBackup *backup, bool strict) COMP_CRC32C(content_crc, buf, strlen(buf)); - get_control_value_str(buf, "path", path, sizeof(path),true); - get_control_value_int64(buf, "size", &write_size, true); - get_control_value_int64(buf, "mode", &mode, true); - get_control_value_int64(buf, "is_datafile", &is_datafile, true); - get_control_value_int64(buf, "is_cfs", &is_cfs, false); - get_control_value_int64(buf, "crc", &crc, true); - get_control_value_str(buf, "compress_alg", compress_alg_string, sizeof(compress_alg_string), false); - get_control_value_int64(buf, "external_dir_num", &external_dir_num, false); - get_control_value_int64(buf, "dbOid", &dbOid, false); - - file = pgFileInit(path); + parse_pb_control_line(&pb_line, ft_str2bytes(ft_cstr(buf))); + + path = pb_control_line_get_str(&pb_line, "path"); + write_size = pb_control_line_get_int64(&pb_line, "size"); + mode = pb_control_line_get_int64(&pb_line, "mode"); + is_datafile = pb_control_line_get_int64(&pb_line, "is_datafile"); + crc = pb_control_line_get_int64(&pb_line, "crc"); + + pb_control_line_try_int64(&pb_line, "is_cfs", &is_cfs); + pb_control_line_try_int64(&pb_line, "dbOid", &dbOid); + pb_control_line_try_str(&pb_line, "compress_alg", &compress_alg); + pb_control_line_try_int64(&pb_line, "external_dir_num", &external_dir_num); + + if (path.len > MAXPGPATH) + elog(ERROR, "File path in "DATABASE_FILE_LIST" is too long: '%s'", + buf); + + file = pgFileInit(path.ptr); file->write_size = (int64) write_size; file->mode = (mode_t) mode; file->is_datafile = is_datafile ? true : false; file->is_cfs = is_cfs ? true : false; file->crc = (pg_crc32) crc; - file->compress_alg = parse_compress_alg(compress_alg_string); - file->external_dir_num = external_dir_num; + file->compress_alg = parse_compress_alg(compress_alg.ptr); + file->external_dir_num = (int)external_dir_num; file->dbOid = dbOid ? dbOid : 0; /* * Optional fields */ - if (get_control_value_str(buf, "kind", kind, sizeof(kind), false)) - file->kind = pio_str2file_kind(kind, path); + if (pb_control_line_try_str(&pb_line, "kind", &kind)) + file->kind = pio_str2file_kind(kind.ptr, path.ptr); else /* fallback to mode for old backups */ - file->kind = pio_statmode2file_kind(file->mode, path); + file->kind = pio_statmode2file_kind(file->mode, path.ptr); - if (get_control_value_str(buf, "linked", linked, sizeof(linked), false) && linked[0]) + if (pb_control_line_try_str(&pb_line, "linked", &linked) && linked.len > 0) { - file->linked = pgut_strdup(linked); + file->linked = ft_strdup(linked).ptr; canonicalize_path(file->linked); } - if (get_control_value_int64(buf, "segno", &segno, false)) + if (pb_control_line_try_int64(&pb_line, "segno", &segno)) file->segno = (int) segno; - if (get_control_value_int64(buf, "n_blocks", &n_blocks, false)) + if (pb_control_line_try_int64(&pb_line, "n_blocks", &n_blocks)) file->n_blocks = (int) n_blocks; - if (get_control_value_int64(buf, "n_headers", &n_headers, false)) + if (pb_control_line_try_int64(&pb_line, "n_headers", &n_headers)) file->n_headers = (int) n_headers; - if (get_control_value_int64(buf, "hdr_crc", &hdr_crc, false)) + if (pb_control_line_try_int64(&pb_line, "hdr_crc", &hdr_crc)) file->hdr_crc = (pg_crc32) hdr_crc; - if (get_control_value_int64(buf, "hdr_off", &hdr_off, false)) + if (pb_control_line_try_int64(&pb_line, "hdr_off", &hdr_off)) file->hdr_off = hdr_off; - if (get_control_value_int64(buf, "hdr_size", &hdr_size, false)) + if (pb_control_line_try_int64(&pb_line, "hdr_size", &hdr_size)) file->hdr_size = (int) hdr_size; - if (get_control_value_int64(buf, "full_size", &uncompressed_size, false)) + if (pb_control_line_try_int64(&pb_line, "full_size", &uncompressed_size)) file->uncompressed_size = uncompressed_size; else file->uncompressed_size = write_size; @@ -1138,6 +1148,8 @@ get_backup_filelist(pgBackup *backup, bool strict) parray_append(files, file); } + deinit_pb_control_line(&pb_line); + FIN_CRC32C(content_crc); if (ferror(fp)) diff --git a/src/dir.c b/src/dir.c index 180927985..48e85ba8c 100644 --- a/src/dir.c +++ b/src/dir.c @@ -124,7 +124,7 @@ static void opt_path_map(ConfigOption *opt, const char *arg, TablespaceList *list, const char *type); static void cleanup_tablespace(const char *path); -static void control_string_bad_format(const char* str); +static void control_string_bad_format(ft_bytes_t str); static bool exclude_files_cb(void *value, void *exclude_args); @@ -1135,160 +1135,183 @@ get_external_remap(char *current_dir) return current_dir; } -/* Parsing states for get_control_value_str() */ -#define CONTROL_WAIT_NAME 1 -#define CONTROL_INNAME 2 -#define CONTROL_WAIT_COLON 3 -#define CONTROL_WAIT_VALUE 4 -#define CONTROL_INVALUE 5 -#define CONTROL_WAIT_NEXT_NAME 6 - /* - * Get value from json-like line "str" of backup_content.control file. + * Parse values from json-like line "str" of backup_content.control file. * * The line has the following format: * {"name1":"value1", "name2":"value2"} - * - * The value will be returned in "value_int64" as int64. - * - * Returns true if the value was found in the line and parsed. */ -bool -get_control_value_int64(const char *str, const char *name, int64 *value_int64, bool is_mandatory) -{ - char buf_int64[32]; +typedef struct pb_control_line_kv { + uint32_t key_hash; + uint32_t key_start; + uint32_t key_len; + uint32_t val_len; +} pb_control_line_kv; + +#define FT_SLICE clkv +#define FT_SLICE_TYPE pb_control_line_kv +#include "ft_array.inc.h" - assert(value_int64); +void +init_pb_control_line(pb_control_line* pb_line) +{ + pb_line->kvs = ft_malloc(sizeof(ft_arr_clkv_t)); + *pb_line->kvs = (ft_arr_clkv_t)ft_arr_init(); + pb_line->strbuf = ft_strbuf_zero(); +} - /* Set default value */ - *value_int64 = 0; +void +parse_pb_control_line(pb_control_line* pb_line, ft_bytes_t line) +{ + pb_control_line_kv kv = {0}; + ft_strbuf_t *strbuf = &pb_line->strbuf; + ft_bytes_t parse; - if (!get_control_value_str(str, name, buf_int64, sizeof(buf_int64), is_mandatory)) - return false; + pb_line->line = line; + ft_arr_clkv_reset_for_reuse(pb_line->kvs); + ft_strbuf_reset_for_reuse(&pb_line->strbuf); - if (!parse_int64(buf_int64, value_int64, 0)) + parse = line; + ft_bytes_consume(&parse, ft_bytes_spnc(parse, "{ \t")); + while (parse.len) { - /* We assume that too big value is -1 */ - if (errno == ERANGE) - *value_int64 = BYTES_INVALID; - else - control_string_bad_format(str); - return false; + ft_bytes_t name; + ft_bytes_t value; + + ft_bytes_consume(&parse, ft_bytes_spnc(parse, SPACES)); + /* name in quotes */ + if (!ft_bytes_starts_withc(parse, "\"")) + control_string_bad_format(line); + ft_bytes_consume(&parse, 1); /* skip quote */ + + name = ft_bytes_split(&parse, ft_bytes_notspnc(parse, "\"")); + if (!ft_bytes_starts_withc(parse, "\"")) + control_string_bad_format(line); + kv.key_start = strbuf->len; + kv.key_len = name.len; + ft_strbuf_catbytes(strbuf, name); + ft_strbuf_cat1(strbuf, '\0'); + kv.key_hash = ft_small_cstr_hash(strbuf->ptr + kv.key_start); + + ft_bytes_consume(&parse, 1); /* skip quote */ + ft_bytes_consume(&parse, ft_bytes_spnc(parse, SPACES)); + if (!ft_bytes_starts_withc(parse, ":")) + control_string_bad_format(line); + ft_bytes_consume(&parse, 1); /* skip colon */ + ft_bytes_consume(&parse, ft_bytes_spnc(parse, SPACES)); + + /* value in quotes */ + if (!ft_bytes_starts_withc(parse, "\"")) + control_string_bad_format(line); + ft_bytes_consume(&parse, 1); /* skip quote */ + + value = ft_bytes_split(&parse, ft_bytes_notspnc(parse, "\"")); + if (!ft_bytes_starts_withc(parse, "\"")) + control_string_bad_format(line); + kv.val_len = value.len; + ft_strbuf_catbytes(strbuf, value); + ft_strbuf_cat1(strbuf, '\0'); + ft_arr_clkv_push(pb_line->kvs, kv); + + ft_bytes_consume(&parse, 1); /* skip quote */ + ft_bytes_consume(&parse, ft_bytes_spnc(parse, SPACES)); + if (ft_bytes_starts_withc(parse, ",")) + { + ft_bytes_consume(&parse, 1); + continue; + } + break; } - return true; + if (!ft_bytes_starts_withc(parse, "}")) + control_string_bad_format(line); + ft_bytes_consume(&parse, 1); + ft_bytes_consume(&parse, ft_bytes_spnc(parse, SPACES)); + if (parse.len != 0) + control_string_bad_format(line); } -/* - * Get value from json-like line "str" of backup_content.control file. - * - * The line has the following format: - * {"name1":"value1", "name2":"value2"} - * - * The value will be returned to "value_str" as string. - * - * Returns true if the value was found in the line. - */ +void +deinit_pb_control_line(pb_control_line *pb_line) +{ + ft_arr_clkv_free(pb_line->kvs); + ft_free(pb_line->kvs); + pb_line->kvs = NULL; + ft_strbuf_free(&pb_line->strbuf); +} bool -get_control_value_str(const char *str, const char *name, - char *value_str, size_t value_str_size, bool is_mandatory) +pb_control_line_try_str(pb_control_line *pb_line, const char *name, ft_str_t *value) { - int state = CONTROL_WAIT_NAME; - char *name_ptr = (char *) name; - char *buf = (char *) str; - char *const value_str_start = value_str; + pb_control_line_kv kv; + ft_str_t key; + uint32_t i; + uint32_t key_hash = ft_small_cstr_hash(name); - assert(value_str); - assert(value_str_size > 0); + for (i = 0; i < pb_line->kvs->len; i++) + { + kv = ft_arr_clkv_at(pb_line->kvs, i); + if (kv.key_hash != key_hash) + continue; + key = ft_str(pb_line->strbuf.ptr + kv.key_start, kv.key_len); + if (!ft_streqc(key, name)) + continue; + *value = ft_str(pb_line->strbuf.ptr + kv.key_start + kv.key_len + 1, + kv.val_len); + return true; + } + *value = ft_str("", 0); + return false; +} - /* Set default value */ - *value_str = '\0'; +bool +pb_control_line_try_int64(pb_control_line *pb_line, const char *name, int64 *value) +{ + ft_str_t val; - while (*buf) - { - switch (state) - { - case CONTROL_WAIT_NAME: - if (*buf == '"') - state = CONTROL_INNAME; - else if (IsAlpha(*buf)) - control_string_bad_format(str); - break; - case CONTROL_INNAME: - /* Found target field. Parse value. */ - if (*buf == '"') - state = CONTROL_WAIT_COLON; - /* Check next field */ - else if (*buf != *name_ptr) - { - name_ptr = (char *) name; - state = CONTROL_WAIT_NEXT_NAME; - } - else - name_ptr++; - break; - case CONTROL_WAIT_COLON: - if (*buf == ':') - state = CONTROL_WAIT_VALUE; - else if (!IsSpace(*buf)) - control_string_bad_format(str); - break; - case CONTROL_WAIT_VALUE: - if (*buf == '"') - { - state = CONTROL_INVALUE; - } - else if (IsAlpha(*buf)) - control_string_bad_format(str); - break; - case CONTROL_INVALUE: - /* Value was parsed, exit */ - if (*buf == '"') - { - *value_str = '\0'; - return true; - } - else - { - /* verify if value_str not exceeds value_str_size limits */ - if (value_str - value_str_start >= value_str_size - 1) { - elog(ERROR, "field \"%s\" is out of range in the line %s of the file %s", - name, str, DATABASE_FILE_LIST); - } - *value_str = *buf; - value_str++; - } - break; - case CONTROL_WAIT_NEXT_NAME: - if (*buf == ',') - state = CONTROL_WAIT_NAME; - break; - default: - /* Should not happen */ - break; - } + *value = 0; + if (!pb_control_line_try_str(pb_line, name, &val)) + return false; - buf++; + if (!parse_int64(val.ptr, value, 0)) + { + /* We assume that too big value is -1 */ + if (errno == ERANGE) + *value = BYTES_INVALID; + else + control_string_bad_format(pb_line->line); + return false; } - /* There is no close quotes */ - if (state == CONTROL_INNAME || state == CONTROL_INVALUE) - control_string_bad_format(str); + return true; +} - /* Did not find target field */ - if (is_mandatory) - elog(ERROR, "field \"%s\" is not found in the line %s of the file %s", - name, str, DATABASE_FILE_LIST); - return false; +ft_str_t +pb_control_line_get_str(pb_control_line *pb_line, const char *name) +{ + ft_str_t res; + if (!pb_control_line_try_str(pb_line, name, &res)) + elog(ERROR, "field \"%s\" is not found in the line %.*s of the file %s", + name, (int)pb_line->line.len, pb_line->line.ptr, DATABASE_FILE_LIST); + return res; +} + +int64_t +pb_control_line_get_int64(pb_control_line *pb_line, const char *name) +{ + int64_t res; + if (!pb_control_line_try_int64(pb_line, name, &res)) + elog(ERROR, "field \"%s\" is not found in the line %.*s of the file %s", + name, (int)pb_line->line.len, pb_line->line.ptr, DATABASE_FILE_LIST); + return res; } static void -control_string_bad_format(const char* str) +control_string_bad_format(ft_bytes_t str) { - elog(ERROR, "%s file has invalid format in line %s", - DATABASE_FILE_LIST, str); + elog(ERROR, "%s file has invalid format in line %.*s", + DATABASE_FILE_LIST, (int)str.len, str.ptr); } /* @@ -1481,47 +1504,50 @@ write_database_map(pgBackup *backup, parray *database_map, parray *backup_files_ parray * read_database_map(pgBackup *backup) { - FILE *fp; parray *database_map; - char buf[MAXPGPATH]; char path[MAXPGPATH]; char database_map_path[MAXPGPATH]; + pioDrive_i drive; + err_i err = $noerr(); + ft_bytes_t content; + ft_bytes_t parse; + ft_bytes_t line; + pb_control_line pb_line; join_path_components(path, backup->root_dir, DATABASE_DIR); join_path_components(database_map_path, path, DATABASE_MAP); - fp = fio_open_stream(FIO_BACKUP_HOST, database_map_path); - if (fp == NULL) - { - /* It is NOT ok for database_map to be missing at this point, so - * we should error here. - * It`s a job of the caller to error if database_map is not empty. - */ - elog(ERROR, "Cannot open \"%s\": %s", database_map_path, strerror(errno)); - } + drive = pioDriveForLocation(FIO_BACKUP_HOST); + + content = $i(pioReadFile, drive, .path = database_map_path, .binary = false, + .err = &err); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Reading database_map"); database_map = parray_new(); - while (fgets(buf, lengthof(buf), fp)) + init_pb_control_line(&pb_line); + + parse = content; + while (parse.len > 0) { - char datname[MAXPGPATH]; + ft_str_t datname; int64 dbOid; + db_map_entry *db_entry = pgut_new0(db_map_entry); - db_map_entry *db_entry = (db_map_entry *) pgut_malloc(sizeof(db_map_entry)); + line = ft_bytes_shift_line(&parse); + parse_pb_control_line(&pb_line, line); - get_control_value_int64(buf, "dbOid", &dbOid, true); - get_control_value_str(buf, "datname", datname, sizeof(datname), true); + dbOid = pb_control_line_get_int64(&pb_line, "dbOid"); + datname = pb_control_line_get_str(&pb_line, "datname"); db_entry->dbOid = dbOid; - db_entry->datname = pgut_strdup(datname); + db_entry->datname = ft_strdup(datname).ptr; parray_append(database_map, db_entry); } - if (ferror(fp)) - elog(ERROR, "Failed to read from file: \"%s\"", database_map_path); - - fio_close_stream(fp); + deinit_pb_control_line(&pb_line); /* Return NULL if file is empty */ if (parray_num(database_map) == 0) diff --git a/src/fu_util/ft_array.inc.h b/src/fu_util/ft_array.inc.h index 847a6393d..aaeeb2471 100644 --- a/src/fu_util/ft_array.inc.h +++ b/src/fu_util/ft_array.inc.h @@ -163,6 +163,7 @@ #define ft_array_ensure fm_cat(ft_array_pref, _ensure) #define ft_array_recapa fm_cat(ft_array_pref, _recapa) #define ft_array_resize fm_cat(ft_array_pref, _resize) +#define ft_array_reset_for_reuse fm_cat(ft_array_pref, _reset_for_reuse) #define ft_array_free fm_cat(ft_array_pref, _free) #define ft_array_insert_at fm_cat(ft_array_pref, _insert_at) @@ -373,6 +374,11 @@ ft_array_resize(ft_array_type *arr, size_t len) { ft_array_recapa(arr, arr->len); } +ft_inline void +ft_array_reset_for_reuse(ft_array_type *arr) { + arr->len = 0; +} + ft_inline ft_array_type ft_array_alloc(FT_SLICE_TYPE *ptr, size_t len) { ft_array_type arr = {NULL, 0, 0}; @@ -576,6 +582,7 @@ ft_array_walk(ft_array_type *arr, FT_WALK_ACT (*walk)(FT_SLICE_TYPE *el)) { #undef ft_array_ensure #undef ft_array_recapa #undef ft_array_resize +#undef ft_array_reset_for_reuse #undef ft_array_free #undef ft_array_insert_at diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 1b24d96c9..d32cb0068 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -970,9 +970,20 @@ extern CompressAlg parse_compress_alg(const char *arg); extern const char* deparse_compress_alg(int alg); /* in dir.c */ -extern bool get_control_value_int64(const char *str, const char *name, int64 *value_int64, bool is_mandatory); -extern bool get_control_value_str(const char *str, const char *name, - char *value_str, size_t value_str_size, bool is_mandatory); +typedef struct ft_arr_clkv_t ft_arr_clkv_t; +typedef struct pb_control_line pb_control_line; +struct pb_control_line { + ft_bytes_t line; + ft_arr_clkv_t *kvs; + ft_strbuf_t strbuf; +}; +extern void init_pb_control_line(pb_control_line *pb_line); +extern void parse_pb_control_line(pb_control_line *pb_line, ft_bytes_t line); +extern void deinit_pb_control_line(pb_control_line *pb_line); +extern int64_t pb_control_line_get_int64(pb_control_line *pb_line, const char *name); +extern ft_str_t pb_control_line_get_str(pb_control_line *pb_line, const char *name); +extern bool pb_control_line_try_int64(pb_control_line *pb_line, const char *name, int64 *value); +extern bool pb_control_line_try_str(pb_control_line *pb_line, const char *name, ft_str_t *value); extern const char *get_tablespace_mapping(const char *dir); extern void create_data_directories(parray *dest_files, diff --git a/src/utils/configuration.c b/src/utils/configuration.c index 1682a0402..4b8f31d49 100644 --- a/src/utils/configuration.c +++ b/src/utils/configuration.c @@ -359,8 +359,6 @@ assign_option(ConfigOption *opt, const char *optarg, OptionSource src) } } -#define SPACES " \t\n\v\f\r" - static bool get_next_token(ft_bytes_t *src, ft_strbuf_t *dest) { diff --git a/src/utils/pgut.h b/src/utils/pgut.h index ea803fd25..63dbef4b0 100644 --- a/src/utils/pgut.h +++ b/src/utils/pgut.h @@ -88,6 +88,8 @@ extern void pgut_free(void *p); #define AssertMacro(x) ((void) 0) #endif +#define SPACES " \t\n\v\f\r" + #define IsSpace(c) (isspace((unsigned char)(c))) #define IsAlpha(c) (isalpha((unsigned char)(c))) #define IsAlnum(c) (isalnum((unsigned char)(c))) From fe75cdbf93c2eb18e30295646ed5dc32ba22bd9a Mon Sep 17 00:00:00 2001 From: Sergey Fukanchik Date: Tue, 29 Nov 2022 19:36:07 +0300 Subject: [PATCH 121/339] PBCKP-367 pio is inside of config_read_opt. fio_access is replaced with missing_ok=false --- src/catalog.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index c9ef77889..b38380ea8 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -2664,14 +2664,8 @@ readBackupControlFile(const char *path) }; pgBackupInit(backup); - if (fio_access(FIO_BACKUP_HOST, path, F_OK) != 0) - { - elog(WARNING, "Control file \"%s\" doesn't exist", path); - pgBackupFree(backup); - return NULL; - } - parsed_options = config_read_opt(path, options, WARNING, true, true); + parsed_options = config_read_opt(path, options, WARNING, true, false); if (parsed_options == 0) { From b288fec839a8cff8b9e02deed75188553f94cf45 Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Wed, 30 Nov 2022 00:23:23 +0500 Subject: [PATCH 122/339] [PBCKP-372]: Rewriting write_backup_filelist using pio and ft_strbuf_t --- src/catalog.c | 88 +++++++++++++++++++++++++++------------------------ 1 file changed, 47 insertions(+), 41 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index b38380ea8..5f6383077 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -2466,6 +2466,7 @@ write_backup(pgBackup *backup, bool strict) ft_logerr(FT_FATAL, $errmsg(err), "Renaming " BACKUP_CONTROL_FILE); } + /* * Output the list of files to backup catalog DATABASE_FILE_LIST */ @@ -2473,40 +2474,36 @@ void write_backup_filelist(pgBackup *backup, parray *files, const char *root, parray *external_list, bool sync) { - FILE *out; char control_path[MAXPGPATH]; char control_path_temp[MAXPGPATH]; size_t i = 0; - #define BUFFERSZ (1024*1024) - char *buf; int64 backup_size_on_disk = 0; int64 uncompressed_size_on_disk = 0; int64 wal_size_on_disk = 0; + pioFile_i out; + pioDrive_i backup_drive = backup->backup_location; + err_i err; + + ft_strbuf_t line = ft_strbuf_zero(); + join_path_components(control_path, backup->root_dir, DATABASE_FILE_LIST); snprintf(control_path_temp, sizeof(control_path_temp), "%s.tmp", control_path); - out = fopen(control_path_temp, PG_BINARY_W); - if (out == NULL) + out = $i(pioOpen, backup_drive, control_path_temp, + .flags = O_WRONLY | O_CREAT | O_EXCL | PG_BINARY, + .err = &err); + if ($haserr(err)) elog(ERROR, "Cannot open file list \"%s\": %s", control_path_temp, strerror(errno)); - if (chmod(control_path_temp, FILE_PERMISSION) == -1) - elog(ERROR, "Cannot change mode of \"%s\": %s", control_path_temp, - strerror(errno)); - - buf = pgut_malloc(BUFFERSZ); - setvbuf(out, buf, _IOFBF, BUFFERSZ); - if (sync) INIT_CRC32C(backup->content_crc); /* print each file in the list */ for (i = 0; i < parray_num(files); i++) { - int len = 0; - char line[BLCKSZ]; - pgFile *file = (pgFile *) parray_get(files, i); + pgFile *file = (pgFile *) parray_get(files, i); /* Ignore disappeared file */ if (file->write_size == FILE_NOT_FOUND) @@ -2534,7 +2531,7 @@ write_backup_filelist(pgBackup *backup, parray *files, const char *root, } } - len = sprintf(line, "{\"path\":\"%s\", \"size\":\"" INT64_FORMAT "\", " + ft_strbuf_catf(&line,"{\"path\":\"%s\", \"size\":\"" INT64_FORMAT "\", " "\"kind\":\"%s\", \"mode\":\"%u\", \"is_datafile\":\"%u\", " "\"is_cfs\":\"%u\", \"crc\":\"%u\", " "\"compress_alg\":\"%s\", \"external_dir_num\":\"%d\", " @@ -2551,52 +2548,63 @@ write_backup_filelist(pgBackup *backup, parray *files, const char *root, if (file->uncompressed_size != 0 && file->uncompressed_size != file->write_size) - len += sprintf(line+len, ",\"full_size\":\"" INT64_FORMAT "\"", + ft_strbuf_catf(&line, ",\"full_size\":\"" INT64_FORMAT "\"", file->uncompressed_size); if (file->is_datafile) - len += sprintf(line+len, ",\"segno\":\"%d\"", file->segno); + ft_strbuf_catf(&line, ",\"segno\":\"%d\"", file->segno); if (file->linked) - len += sprintf(line+len, ",\"linked\":\"%s\"", file->linked); + ft_strbuf_catf(&line, ",\"linked\":\"%s\"", file->linked); if (file->n_blocks > 0) - len += sprintf(line+len, ",\"n_blocks\":\"%i\"", file->n_blocks); + ft_strbuf_catf(&line, ",\"n_blocks\":\"%i\"", file->n_blocks); if (file->n_headers > 0) { - len += sprintf(line+len, ",\"n_headers\":\"%i\"", file->n_headers); - len += sprintf(line+len, ",\"hdr_crc\":\"%u\"", file->hdr_crc); - len += sprintf(line+len, ",\"hdr_off\":\"%llu\"", file->hdr_off); - len += sprintf(line+len, ",\"hdr_size\":\"%i\"", file->hdr_size); + ft_strbuf_catf(&line, ",\"n_headers\":\"%i\"", file->n_headers); + ft_strbuf_catf(&line, ",\"hdr_crc\":\"%u\"", file->hdr_crc); + ft_strbuf_catf(&line, ",\"hdr_off\":\"%llu\"", file->hdr_off); + ft_strbuf_catf(&line, ",\"hdr_size\":\"%i\"", file->hdr_size); } - sprintf(line+len, "}\n"); + ft_strbuf_catf(&line, "}\n"); if (sync) - COMP_CRC32C(backup->content_crc, line, strlen(line)); + COMP_CRC32C(backup->content_crc, (char*)line.ptr, line.len); + + $i(pioWrite, out, ft_bytes(line.ptr, line.len), &err); + + ft_strbuf_reset_for_reuse(&line); - fprintf(out, "%s", line); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Writing into " DATABASE_FILE_LIST ".tmp"); } + ft_strbuf_free(&line); + if (sync) FIN_CRC32C(backup->content_crc); - if (fflush(out) != 0) - elog(ERROR, "Cannot flush file list \"%s\": %s", - control_path_temp, strerror(errno)); + err = $i(pioWriteFinish, out); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Flushing " DATABASE_FILE_LIST ".tmp"); - if (sync && fsync(fileno(out)) < 0) - elog(ERROR, "Cannot sync file list \"%s\": %s", - control_path_temp, strerror(errno)); + /* if (sync) + { + err = pioSync(out); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Sync " DATABASE_FILE_LIST ".tmp"); + } */ - if (fclose(out) != 0) - elog(ERROR, "Cannot close file list \"%s\": %s", - control_path_temp, strerror(errno)); + err = $i(pioClose, out, .sync=true); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Closing " DATABASE_FILE_LIST ".tmp"); - if (rename(control_path_temp, control_path) < 0) - elog(ERROR, "Cannot rename file \"%s\" to \"%s\": %s", - control_path_temp, control_path, strerror(errno)); + err = $i(pioRename, backup->backup_location, + .old_path = control_path_temp, .new_path = control_path); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Renaming " DATABASE_FILE_LIST ".tmp"); /* use extra variable to avoid reset of previous data_bytes value in case of error */ backup->data_bytes = backup_size_on_disk; @@ -2604,8 +2612,6 @@ write_backup_filelist(pgBackup *backup, parray *files, const char *root, if (backup->stream) backup->wal_bytes = wal_size_on_disk; - - free(buf); } /* From fb8441a9b09b781a604a8fae95a773e2c4ab24e8 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 2 Dec 2022 09:42:35 +0300 Subject: [PATCH 123/339] externalize couple of functions --- src/backup.c | 6 +----- src/pg_probackup.h | 3 +++ 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/backup.c b/src/backup.c index 44f1905bd..9b778285f 100644 --- a/src/backup.c +++ b/src/backup.c @@ -37,8 +37,6 @@ bool backup_in_progress = false; /* * Backup routines */ -static void backup_cleanup(bool fatal, void *userdata); - static void *backup_files(void *arg); static void do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, @@ -47,8 +45,6 @@ static void do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, static void pg_switch_wal(PGconn *conn); -static void check_external_for_tablespaces(parray *external_list, - PGconn *backup_conn); static parray *get_database_map(PGconn *pg_startbackup_conn); /* pgpro specific functions */ @@ -1926,7 +1922,7 @@ pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startb * of the DB cluster. * Also update backup status to ERROR when the backup is not finished. */ -static void +void backup_cleanup(bool fatal, void *userdata) { /* diff --git a/src/pg_probackup.h b/src/pg_probackup.h index d32cb0068..5ed0d7b08 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -797,6 +797,9 @@ extern BackupMode parse_backup_mode(const char *value); extern const char *deparse_backup_mode(BackupMode mode); extern void process_block_change(ForkNumber forknum, RelFileNode rnode, BlockNumber blkno); +extern void check_external_for_tablespaces(parray *external_list, + PGconn *backup_conn); +extern void backup_cleanup(bool fatal, void *userdata); /* in catchup.c */ extern int do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, bool sync_dest_files, From b239823c2929f7a4725fb64bc05bfc24cfc1567b Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 2 Dec 2022 11:42:43 +0300 Subject: [PATCH 124/339] [PBCKP-374] pioLocalDrive_pioWriteFile - use temporary file and rename to reduce calls to fio_rename/pioRename plus remove forgotten fio_open --- src/catalog.c | 11 +---- src/restore.c | 17 +------ src/utils/file.c | 112 +++++++++++++++++++++++++++++++++++------------ 3 files changed, 87 insertions(+), 53 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index 5f6383077..ab1799ea7 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -2441,29 +2441,20 @@ pgBackupWriteControl(pgBackup *backup, bool utc) void write_backup(pgBackup *backup, bool strict) { - FOBJ_FUNC_ARP(); - ft_str_t buf; - char path_temp[MAXPGPATH]; char path[MAXPGPATH]; err_i err = $noerr(); join_path_components(path, backup->root_dir, BACKUP_CONTROL_FILE); - snprintf(path_temp, sizeof(path_temp), "%s.tmp", path); buf = pgBackupWriteControl(backup, true); - err = $i(pioWriteFile, backup->backup_location, .path = path_temp, + err = $i(pioWriteFile, backup->backup_location, .path = path, .content = ft_bytes(buf.ptr, buf.len), .binary = false); ft_str_free(&buf); if ($haserr(err)) ft_logerr(FT_FATAL, $errmsg(err), "Writting " BACKUP_CONTROL_FILE ".tmp"); - - err = $i(pioRename, backup->backup_location, - .old_path = path_temp, .new_path = path); - if ($haserr(err)) - ft_logerr(FT_FATAL, $errmsg(err), "Renaming " BACKUP_CONTROL_FILE); } diff --git a/src/restore.c b/src/restore.c index 31d2cfba5..e5330c947 100644 --- a/src/restore.c +++ b/src/restore.c @@ -1536,9 +1536,7 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, { char postgres_auto_path[MAXPGPATH]; - char postgres_auto_path_tmp[MAXPGPATH]; char path[MAXPGPATH]; - FILE *fp_tmp = NULL; char current_time_str[100]; /* postgresql.auto.conf parsing */ ft_bytes_t old_content; @@ -1565,11 +1563,6 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, ft_logerr(FT_FATAL, $errmsg(err), ""); } - sprintf(postgres_auto_path_tmp, "%s.tmp", postgres_auto_path); - fp_tmp = fio_fopen(FIO_DB_HOST, postgres_auto_path_tmp, "w"); - if (fp_tmp == NULL) - elog(ERROR, "cannot open \"%s\": %s", postgres_auto_path_tmp, strerror(errno)); - parse = old_content; /* copy since ft_bytes_shift_line mutates bytes */ while (parse.len > 0) @@ -1642,19 +1635,11 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, /* Write data to postgresql.auto.conf.tmp */ err = $i(pioWriteFile, backup->database_location, - .path = postgres_auto_path_tmp, + .path = postgres_auto_path, .content = ft_str2bytes(ft_strbuf_ref(&content))); ft_strbuf_free(&content); if ($haserr(err)) ft_logerr(FT_FATAL, $errmsg(err), "writting recovery options"); - - err = $i(pioRename, backup->database_location, - .old_path = postgres_auto_path_tmp, - .new_path = postgres_auto_path); - if ($haserr(err)) - ft_logerr(FT_FATAL, $errmsg(err), "renaming postgres.auto.conf file"); - - /* skip chmod, since pioWriteFile creates with FILE_PERMISSION */ } #endif diff --git a/src/utils/file.c b/src/utils/file.c index dba2826c6..701b2ec34 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -4488,9 +4488,10 @@ pioLocalDrive_pioWriteFile(VSelf, path_t path, ft_bytes_t content, bool binary) { FOBJ_FUNC_ARP(); Self(pioLocalDrive); - pioFile_i fl; - size_t amount; err_i err; + ft_str_t temppath = ft_str(NULL, 0); + int fd = -1; + ssize_t r; fobj_reset_err(&err); @@ -4501,40 +4502,96 @@ pioLocalDrive_pioWriteFile(VSelf, path_t path, ft_bytes_t content, bool binary) return $iresult(err); } - /* - * rely on "local file is read whole at once always". - * Is it true? - */ - fl = $(pioOpen, self, .path = path, - .flags = O_WRONLY | O_CREAT | O_TRUNC | (binary ? PG_BINARY : 0), - .err = &err); - if ($haserr(err)) - return $iresult(err); + if (content.len == 0) + { + /* just create file */ + fd = creat(path, FILE_PERMISSION); + if (fd < 0) + { + err = $syserr(errno, "Create file for {path} failed", path(path)); + return $iresult(err); + } + if (fsync(fd) < 0) + { + err = $syserr(errno, "Closing file {path} failed", path(path)); + close(fd); + return $iresult(err); + } + if (close(fd) < 0) + { + err = $syserr(errno, "Closing file {path} failed", path(path)); + return $iresult(err); + } + return $noerr(); + } - amount = $i(pioWrite, fl, .buf = content, .err = &err); - if ($haserr(err)) - return $iresult(err); + /* make temporary name */ + temppath = ft_asprintf("%s~tmpXXXXXX", path); + fd = mkstemp(temppath.ptr); + if (fd < 0) + { + err = $syserr(errno, "Create temp file for {path} failed", path(path)); + goto error; + } - if (amount != content.len) +#if PG_BINARY + if (binary && _setmode(fd, PG_BINARY) < 0) { - err = $err(RT, "File {path:q} is truncated while reading", - path(path), errNo(EBUSY)); - $iresult(err); - return err; + err = $syserr(errno, "Set file mode for {path} failed", path(temppath.ptr)); + goto error; } +#endif - err = $i(pioWriteFinish, fl); - if ($haserr(err)) - return $iresult(err); + r = durable_write(fd, content.ptr, content.len); + if (r < 0) + { + err = $syserr(errno, "Cannot write to file {path:q}", + path(temppath.ptr)); + goto error; + } - err = $i(pioClose, fl, .sync = true); - if ($haserr(err)) + if (r < content.len) { - $(pioRemove, self, .path = path); - return $iresult(err); + err = $err(SysErr, "Short write on {path:q}: {writtenSz} < {wantedSz}", + path(temppath.ptr), writtenSz(r), wantedSz(content.len), + errNo(EIO)); + goto error; } + if (fsync(fd) < 0) + { + err = $syserr(errno, "Cannot fsync file {path:q}", + path(temppath.ptr)); + goto error; + } + + if (close(fd) < 0) + { + err = $syserr(errno, "Cannot close file {path:q}", + path(temppath.ptr)); + goto error; + } + fd = -1; + + if (rename(temppath.ptr, path) < 0) + { + err = $syserr(errno, "Cannot close file {path:q}", + path(temppath.ptr)); + goto error; + } + + ft_str_free(&temppath); return $noerr(); + +error: + if (fd >= 0) + close(fd); + if (temppath.len > 0) + { + remove(temppath.ptr); + ft_str_free(&temppath); + } + return $iresult(err); } /* LOCAL FILE */ @@ -4616,7 +4673,8 @@ pioLocalFile_pioWrite(VSelf, ft_bytes_t buf, err_i *err) if (r < buf.len) { *err = $err(SysErr, "Short write on {path:q}: {writtenSz} < {wantedSz}", - path(self->p.path), writtenSz(r), wantedSz(buf.len)); + path(self->p.path), writtenSz(r), wantedSz(buf.len), + errNo(EIO)); } return r; } From 2d084aaf72499cbe4c97942d1cb98db24252d438 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 2 Dec 2022 12:48:15 +0300 Subject: [PATCH 125/339] ft: dealloc message in fobjErr_fobjDispose --- src/fu_util/impl/fo_impl.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/fu_util/impl/fo_impl.c b/src/fu_util/impl/fo_impl.c index 52884d27e..26f8b7625 100644 --- a/src/fu_util/impl/fo_impl.c +++ b/src/fu_util/impl/fo_impl.c @@ -1166,6 +1166,7 @@ fobjErr_fobjDispose(VSelf) { break; } } + ft_free((void*)self->message); $del(&self->sibling); } From ae4b3ba8997da022b934e23978960b5ef4e77df5 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 2 Dec 2022 12:54:32 +0300 Subject: [PATCH 126/339] make ft_malloc/free to be functions instead of macros --- src/fu_util/ft_util.h | 9 +++++---- src/fu_util/impl/ft_impl.c | 24 ++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 4 deletions(-) diff --git a/src/fu_util/ft_util.h b/src/fu_util/ft_util.h index 5fe08678c..aa53419df 100644 --- a/src/fu_util/ft_util.h +++ b/src/fu_util/ft_util.h @@ -162,10 +162,11 @@ extern void* ft_realloc(void* ptr, size_t new_sz); extern void* ft_calloc(size_t sz); extern void* ft_realloc_arr(void* ptr, size_t elem_sz, size_t old_elems, size_t new_elems); -#define ft_malloc(sz) ft_realloc(NULL, (sz)) -#define ft_malloc_arr(sz, cnt) ft_realloc(NULL, ft_mul_size((sz), (cnt))) -#define ft_free(ptr) ft_realloc((ptr), 0) -#define ft_calloc_arr(sz, cnt) ft_calloc(ft_mul_size((sz), (cnt))) +extern void* ft_malloc(size_t sz); +extern void* ft_malloc_arr(size_t sz, size_t cnt); +extern void ft_free(void* ptr); +extern void* ft_calloc_arr(size_t sz, size_t cnt); + extern void ft_set_allocators(void *(*_realloc)(void *, size_t), void (*_free)(void*)); diff --git a/src/fu_util/impl/ft_impl.c b/src/fu_util/impl/ft_impl.c index ba6bb6664..84b47714e 100644 --- a/src/fu_util/impl/ft_impl.c +++ b/src/fu_util/impl/ft_impl.c @@ -62,6 +62,30 @@ ft_realloc(void *oldptr, size_t size) { return NULL; } +void* +ft_malloc(size_t sz) +{ + return ft_realloc(NULL, sz); +} + +void* +ft_malloc_arr(size_t sz, size_t cnt) +{ + return ft_realloc(NULL, ft_mul_size(sz, cnt)); +} + +void +ft_free(void* ptr) +{ + ft_realloc(ptr, 0); +} + +void* +ft_calloc_arr(size_t sz, size_t cnt) +{ + return ft_calloc(ft_mul_size(sz, cnt)); +} + void* ft_realloc_arr(void* ptr, size_t elem_sz, size_t old_elems, size_t new_elems) { ptr = ft_realloc(ptr, ft_mul_size(elem_sz, new_elems)); From 042f172eab37178a377afc23422b6f9bfe029553 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 4 Dec 2022 04:21:38 +0300 Subject: [PATCH 127/339] temporary disable windows build on REL_2_6 (try to) --- .github/workflows/build.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6f99d0f27..7393e2650 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,9 +1,9 @@ name: Build Probackup on: - push: - branches: - - "**" + #push: + # branches: + # - "**" # Runs triggered by pull requests are disabled to prevent executing potentially unsafe code from public pull requests # pull_request: # branches: From ce297c3fa502545495a833a4f21ab44ff10c1bab Mon Sep 17 00:00:00 2001 From: Sergey Fukanchik Date: Mon, 5 Dec 2022 13:10:42 +0300 Subject: [PATCH 128/339] PBCKP-351 use new error message format in control test. --- tests/restore_test.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/restore_test.py b/tests/restore_test.py index 4c940ca29..669e15384 100644 --- a/tests/restore_test.py +++ b/tests/restore_test.py @@ -2987,8 +2987,8 @@ def test_empty_and_mangled_database_map(self): self.output, self.cmd)) except ProbackupException as e: self.assertIn( - 'ERROR: field "dbOid" is not found in the line 42 of ' - 'the file backup_content.control', e.message, + 'ERROR: backup_content.control file has invalid format in line 42', + e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) @@ -3003,8 +3003,8 @@ def test_empty_and_mangled_database_map(self): self.output, self.cmd)) except ProbackupException as e: self.assertIn( - 'ERROR: field "dbOid" is not found in the line 42 of ' - 'the file backup_content.control', e.message, + 'ERROR: backup_content.control file has invalid format in line 42', + e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) From 6937adf29c4b263028c345e587a317e6b9443129 Mon Sep 17 00:00:00 2001 From: Yuriy Sokolov Date: Mon, 5 Dec 2022 14:12:57 +0300 Subject: [PATCH 129/339] [PBCKP-350] make pio_line_reader and use it in get_backup_filelist --- src/catalog.c | 46 +++++++++-------- src/fu_util/ft_util.h | 1 + src/pg_probackup.h | 1 + src/utils/file.c | 116 ++++++++++++++++++++++-------------------- src/utils/file.h | 15 ++++-- 5 files changed, 101 insertions(+), 78 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index ab1799ea7..63ee5f3ed 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1013,23 +1013,26 @@ catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id parray * get_backup_filelist(pgBackup *backup, bool strict) { + FOBJ_FUNC_ARP(); parray *files = NULL; char backup_filelist_path[MAXPGPATH]; - FILE *fp; - char buf[BLCKSZ]; - char stdio_buf[STDIO_BUFSIZE]; pg_crc32 content_crc = 0; - pb_control_line pb_line; + pb_control_line ft_cleanup(deinit_pb_control_line) + pb_line = {0}; + pio_line_reader ft_cleanup(deinit_pio_line_reader) + line_reader = {0}; + ft_bytes_t line; + err_i err = $noerr(); + pioFile_i fl; join_path_components(backup_filelist_path, backup->root_dir, DATABASE_FILE_LIST); - fp = fio_open_stream(FIO_BACKUP_HOST, backup_filelist_path); - if (fp == NULL) - elog(ERROR, "cannot open \"%s\": %s", backup_filelist_path, strerror(errno)); + fl = $i(pioOpen, backup->backup_location, .path = backup_filelist_path, + .flags = O_RDONLY, .err = &err); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Opening backup filelist"); - /* enable stdio buffering for local file */ - if (!fio_is_remote(FIO_BACKUP_HOST)) - setvbuf(fp, stdio_buf, _IOFBF, STDIO_BUFSIZE); + init_pio_line_reader(&line_reader, $reduce(pioRead, fl), IN_BUF_SIZE); files = parray_new(); @@ -1037,7 +1040,7 @@ get_backup_filelist(pgBackup *backup, bool strict) init_pb_control_line(&pb_line); - while (fgets(buf, lengthof(buf), fp)) + for(;;) { ft_str_t path; ft_str_t linked; @@ -1059,9 +1062,15 @@ get_backup_filelist(pgBackup *backup, bool strict) hdr_size; pgFile *file; - COMP_CRC32C(content_crc, buf, strlen(buf)); + line = pio_line_reader_getline(&line_reader, &err); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Reading backup filelist"); + if (line.len == 0) + break; - parse_pb_control_line(&pb_line, ft_str2bytes(ft_cstr(buf))); + COMP_CRC32C(content_crc, line.ptr, line.len); + + parse_pb_control_line(&pb_line, line); path = pb_control_line_get_str(&pb_line, "path"); write_size = pb_control_line_get_int64(&pb_line, "size"); @@ -1075,8 +1084,8 @@ get_backup_filelist(pgBackup *backup, bool strict) pb_control_line_try_int64(&pb_line, "external_dir_num", &external_dir_num); if (path.len > MAXPGPATH) - elog(ERROR, "File path in "DATABASE_FILE_LIST" is too long: '%s'", - buf); + elog(ERROR, "File path in "DATABASE_FILE_LIST" is too long: '%.*s'", + (int)line.len, line.ptr); file = pgFileInit(path.ptr); file->write_size = (int64) write_size; @@ -1148,14 +1157,9 @@ get_backup_filelist(pgBackup *backup, bool strict) parray_append(files, file); } - deinit_pb_control_line(&pb_line); - FIN_CRC32C(content_crc); - if (ferror(fp)) - elog(ERROR, "Failed to read from file: \"%s\"", backup_filelist_path); - - fio_close_stream(fp); + err = $i(pioClose, fl); if (backup->content_crc != 0 && backup->content_crc != content_crc) diff --git a/src/fu_util/ft_util.h b/src/fu_util/ft_util.h index aa53419df..59acc1234 100644 --- a/src/fu_util/ft_util.h +++ b/src/fu_util/ft_util.h @@ -38,6 +38,7 @@ typedef SSIZE_T ssize_t; #define ft_likely(x) __builtin_expect(!!(x), 1) #define ft_unlikely(x) __builtin_expect(!!(x), 0) #define ft_always_inline __attribute__((always_inline)) +#define ft_cleanup(func) __attribute__((cleanup(func))) #else #define ft_gcc_const #define ft_gcc_pure diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 5ed0d7b08..20d1ec295 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -106,6 +106,7 @@ extern const char *PROGRAM_EMAIL; #define ERRMSG_MAX_LEN 2048 #define CHUNK_SIZE (128 * 1024) #define LARGE_CHUNK_SIZE (4 * 1024 * 1024) +#define IN_BUF_SIZE (512 * 1024) #define OUT_BUF_SIZE (512 * 1024) /* retry attempts */ diff --git a/src/utils/file.c b/src/utils/file.c index 701b2ec34..062d24c57 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -14,7 +14,6 @@ #define PRINTF_BUF_SIZE 1024 static __thread unsigned long fio_fdset = 0; -static __thread void* fio_stdin_buffer; static __thread int fio_stdout = 0; static __thread int fio_stdin = 0; static __thread int fio_stderr = 0; @@ -381,59 +380,6 @@ pio_limit_mode(mode_t mode) return mode; } -/* Open input stream. Remote file is fetched to the in-memory buffer and then accessed through Linux fmemopen */ -FILE* -fio_open_stream(fio_location location, const char* path) -{ - FILE* f; - if (fio_is_remote(location)) - { - fio_header hdr; - hdr.cop = FIO_LOAD; - hdr.size = strlen(path) + 1; - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(fio_stdout, path, hdr.size), hdr.size); - - IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); - Assert(hdr.cop == FIO_SEND); - if (hdr.size > 0) - { - Assert(fio_stdin_buffer == NULL); - fio_stdin_buffer = pgut_malloc(hdr.size); - IO_CHECK(fio_read_all(fio_stdin, fio_stdin_buffer, hdr.size), hdr.size); -#ifdef WIN32 - f = tmpfile(); - IO_CHECK(fwrite(fio_stdin_buffer, 1, hdr.size, f), hdr.size); - SYS_CHECK(fseek(f, 0, SEEK_SET)); -#else - f = fmemopen(fio_stdin_buffer, hdr.size, "r"); -#endif - } - else - { - f = NULL; - } - } - else - { - f = fopen(path, "rt"); - } - return f; -} - -/* Close input stream */ -int -fio_close_stream(FILE* f) -{ - if (fio_stdin_buffer) - { - free(fio_stdin_buffer); - fio_stdin_buffer = NULL; - } - return fclose(f); -} - /* Open directory */ DIR* fio_opendir(fio_location location, const char* path) @@ -6086,6 +6032,68 @@ pioReadFull(pioRead_i src, ft_bytes_t bytes, err_i* err) return bytes.len - b.len; } +void +init_pio_line_reader(pio_line_reader *r, pioRead_i source, size_t max_length) { + r->source = $iref(source); + r->buf = ft_bytes_alloc(max_length); + r->rest = ft_bytes(NULL, 0); +} + +void +deinit_pio_line_reader(pio_line_reader *r) +{ + $idel(&r->source); + ft_bytes_free(&r->buf); + r->rest = ft_bytes(NULL, 0); +} + +ft_bytes_t +pio_line_reader_getline(pio_line_reader *r, err_i *err) +{ + ft_bytes_t res; + ft_bytes_t tmp; + size_t sz; + char last; + + fobj_reset_err(err); + +retry: + res = ft_bytes_shift_line(&r->rest); + /* if we got too long line */ + if (res.len == r->buf.len) + { + *err = $err(RT, "Line doesn't fit buffer of size {size}", + size(r->buf.len)); + /* restore rest to produce error again next time */ + r->rest = r->buf; + return ft_bytes(NULL, 0); + } + + last = res.len != 0 ? res.ptr[res.len-1] : 0; + /* not first time and definitely reached end of line */ + if (res.len != 0 && (last == '\n' || last == '\r')) + return res; + + if (res.ptr != NULL) + memmove(r->buf.ptr, res.ptr, res.len); + + r->rest = ft_bytes(r->buf.ptr, res.len); + tmp = r->buf; + ft_bytes_consume(&tmp, res.len); + sz = $i(pioRead, r->source, tmp, err); + r->rest.len += sz; + if ($haserr(*err)) + return ft_bytes(NULL, 0); + /* reached end of file */ + if (sz == 0) + { + res = r->rest; + r->rest = ft_bytes(NULL, 0); + return res; + } + goto retry; +} + fobj_klass_handle(pioFile); fobj_klass_handle(pioLocalDrive); fobj_klass_handle(pioRemoteDrive); diff --git a/src/utils/file.h b/src/utils/file.h index 5fd4f6b81..bbfedd5b6 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -163,9 +163,6 @@ extern int fio_fseek(FILE* f, off_t offs); extern int fio_ftruncate(FILE* f, off_t size); extern int fio_fclose(FILE* f); -extern FILE* fio_open_stream(fio_location location, const char* name); -extern int fio_close_stream(FILE* f); - /* gzFile-style functions */ #ifdef HAVE_LIBZ extern gzFile fio_gzopen(fio_location location, const char* path, const char* mode, int level); @@ -355,4 +352,16 @@ extern err_i pioCopyWithFilters(pioWriteFlush_i dest, pioRead_i src, }) extern size_t pioReadFull(pioRead_i src, ft_bytes_t bytes, err_i* err); + +typedef struct pio_line_reader pio_line_reader; +struct pio_line_reader { + pioRead_i source; + ft_bytes_t buf; + ft_bytes_t rest; +}; + +extern void init_pio_line_reader(pio_line_reader *r, pioRead_i source, size_t max_length); +extern void deinit_pio_line_reader(pio_line_reader *r); +extern ft_bytes_t pio_line_reader_getline(pio_line_reader *r, err_i *err); + #endif From aa074cad26feae1e70ca9fa68530eb9940eb0d3c Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 5 Dec 2022 20:32:04 +0300 Subject: [PATCH 130/339] ignore lock_file absense in release_shared_lock_file It were ignored until f6e7be6d . Lets put that nuance back. TODO: should find why we call `release_shared_lock_file` without lock held. --- src/catalog.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/catalog.c b/src/catalog.c index 63ee5f3ed..f18a9b1c1 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -766,7 +766,13 @@ release_shared_lock_file(const char *backup_dir) if (pids.len == 0) { ft_arr_pid_free(&pids); - if (remove(lock_file) != 0) + /* + * TODO: we should not call 'release_shared_lock_file' if we don't hold it. + * Therefore we should not ignore ENOENT. + * We should find why ENOENT happens, but until then lets ignore it as + * it were ignored for a while. + */ + if (remove(lock_file) != 0 && errno != ENOENT) elog(ERROR, "Cannot remove shared lock file \"%s\": %s", lock_file, strerror(errno)); return; } From 1b48745da1acacf24bdafca5ac0d2eabfd0810c1 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 6 Dec 2022 00:14:40 +0300 Subject: [PATCH 131/339] Some pioDrive refactoring [PBCKP-319][PBCKP-384] - started with changing `do_set_config` to use pioWriteFile - to do this I wanted to pass drive to `do_set_config` - this lead me to addition: - `backup_location` to CatalogState - `backup_location` and `database_location` to `InstanceState` - removing `database_location` from `pgBackup` because it could be get from `InstanceState` (perhaps it is wrong) - so I wanted to pass drive to `pgBackupInit`, through `read_backup`->`readBackupControlFile` - since there was `config_read_opt`, I wanted to pass drive into as well - and I had to split `config_read_opt` into `config_parse_opt` to unbind it from "file.h" - after all I found `do_set_config` doesn't need `missing_ok` argument since `SET_CONFIG_CMD` reads config with `config_read_opt(missing_ok=false)` - and btw changed `setMyLocation` to be more readable - and factored out creation of InstanceState --- src/catalog.c | 55 +++++++++++++++---------- src/configure.c | 84 ++++++++++++++++++++++----------------- src/init.c | 2 +- src/pg_probackup.c | 39 +++++++++--------- src/pg_probackup.h | 13 ++++-- src/pg_probackup_state.h | 4 +- src/restore.c | 20 +++++----- src/utils/configuration.c | 24 ++--------- src/utils/configuration.h | 4 +- src/utils/file.c | 29 ++++++++++---- src/validate.c | 17 +++----- 11 files changed, 156 insertions(+), 135 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index f18a9b1c1..1cddc5ada 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -21,7 +21,7 @@ static pgBackup* get_closest_backup(timelineInfo *tlinfo); static pgBackup* get_oldest_backup(timelineInfo *tlinfo); static const char *backupModes[] = {"", "PAGE", "PTRACK", "DELTA", "FULL"}; -static pgBackup *readBackupControlFile(const char *path); +static pgBackup *readBackupControlFile(pioDrive_i drive, const char *path); static err_i create_backup_dir(pgBackup *backup, const char *backup_instance_path); static bool backup_lock_exit_hook_registered = false; @@ -108,13 +108,13 @@ unlink_lock_atexit(bool unused_fatal, void *unused_userdata) * If no backup matches, return NULL. */ pgBackup * -read_backup(const char *root_dir) +read_backup(pioDrive_i drive, const char *root_dir) { char conf_path[MAXPGPATH]; join_path_components(conf_path, root_dir, BACKUP_CONTROL_FILE); - return readBackupControlFile(conf_path); + return readBackupControlFile(drive, conf_path); } /* @@ -129,7 +129,7 @@ write_backup_status(pgBackup *backup, BackupStatus status, { pgBackup *tmp; - tmp = read_backup(backup->root_dir); + tmp = read_backup(backup->backup_location, backup->root_dir); if (!tmp) { /* @@ -864,15 +864,7 @@ catalog_get_instance_list(CatalogState *catalogState) if (!S_ISDIR(st.st_mode)) continue; - instanceState = pgut_new(InstanceState); - - strncpy(instanceState->instance_name, dent->d_name, MAXPGPATH); - join_path_components(instanceState->instance_backup_subdir_path, - catalogState->backup_subdir_path, instanceState->instance_name); - join_path_components(instanceState->instance_wal_subdir_path, - catalogState->wal_subdir_path, instanceState->instance_name); - join_path_components(instanceState->instance_config_path, - instanceState->instance_backup_subdir_path, BACKUP_CATALOG_CONF_FILE); + instanceState = makeInstanceState(catalogState, dent->d_name); instanceState->config = readInstanceConfigFile(instanceState); parray_append(instances, instanceState); @@ -934,12 +926,12 @@ catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id /* read backup information from BACKUP_CONTROL_FILE */ join_path_components(backup_conf_path, data_path, BACKUP_CONTROL_FILE); - backup = readBackupControlFile(backup_conf_path); + backup = readBackupControlFile(instanceState->backup_location, backup_conf_path); if (!backup) { backup = pgut_new0(pgBackup); - pgBackupInit(backup); + pgBackupInit(backup, instanceState->backup_location); backup->start_time = base36dec(data_ent->d_name); /* XXX BACKUP_ID change it when backup_id wouldn't match start_time */ Assert(backup->backup_id == 0 || backup->backup_id == backup->start_time); @@ -2621,7 +2613,7 @@ write_backup_filelist(pgBackup *backup, parray *files, const char *root, * - Do not care section. */ static pgBackup * -readBackupControlFile(const char *path) +readBackupControlFile(pioDrive_i drive, const char *path) { pgBackup *backup = pgut_new0(pgBackup); char *backup_mode = NULL; @@ -2670,9 +2662,9 @@ readBackupControlFile(const char *path) {0} }; - pgBackupInit(backup); + pgBackupInit(backup, drive); - parsed_options = config_read_opt(path, options, WARNING, true, false); + parsed_options = config_read_opt(drive, path, options, WARNING, true, false); if (parsed_options == 0) { @@ -2892,7 +2884,7 @@ pgNodeInit(PGNodeInfo *node) * Fill pgBackup struct with default values. */ void -pgBackupInit(pgBackup *backup) +pgBackupInit(pgBackup *backup, pioDrive_i drive) { backup->backup_id = INVALID_BACKUP_ID; backup->backup_mode = BACKUP_MODE_INVALID; @@ -2934,8 +2926,7 @@ pgBackupInit(pgBackup *backup) backup->note = NULL; backup->content_crc = 0; - backup->backup_location = pioDriveForLocation(FIO_BACKUP_HOST); - backup->database_location = pioDriveForLocation(FIO_DB_HOST); + backup->backup_location = drive; } /* free pgBackup object */ @@ -2946,7 +2937,6 @@ pgBackupFree(void *backup) /* Both point to global static vars */ b->backup_location.self = NULL; - b->database_location.self = NULL; pg_free(b->primary_conninfo); pg_free(b->external_dir_str); @@ -3146,3 +3136,24 @@ append_children(parray *backup_list, pgBackup *target_backup, parray *append_lis } } } + +InstanceState* makeInstanceState(CatalogState* catalogState, const char* name) +{ + InstanceState* instanceState; + + instanceState = pgut_new0(InstanceState); + + instanceState->catalog_state = catalogState; + + strncpy(instanceState->instance_name, name, MAXPGPATH); + join_path_components(instanceState->instance_backup_subdir_path, + catalogState->backup_subdir_path, instanceState->instance_name); + join_path_components(instanceState->instance_wal_subdir_path, + catalogState->wal_subdir_path, instanceState->instance_name); + join_path_components(instanceState->instance_config_path, + instanceState->instance_backup_subdir_path, BACKUP_CATALOG_CONF_FILE); + + instanceState->backup_location = catalogState->backup_location; + + return instanceState; +} diff --git a/src/configure.c b/src/configure.c index fd3d87a26..e6af22f6d 100644 --- a/src/configure.c +++ b/src/configure.c @@ -283,32 +283,54 @@ do_show_config(void) show_configure_end(); } +/* + * Get configuration from configuration file. + * Return number of parsed options. + */ +int +config_read_opt(pioDrive_i drive, const char *path, ConfigOption options[], int elevel, + bool strict, bool missing_ok) +{ + int parsed_options = 0; + err_i err = $noerr(); + ft_bytes_t config_file = {0}; + + if (!options) + return parsed_options; + + config_file = $i(pioReadFile, drive, .path = path, .binary = false, + .err = &err); + if ($haserr(err)) + { + if (missing_ok && getErrno(err) == ENOENT) + return 0; + + ft_logerr(FT_FATAL, $errmsg(err), "could not read file"); + return 0; + } + + parsed_options = config_parse_opt(config_file, path, options, elevel, strict); + + ft_bytes_free(&config_file); + + return parsed_options; +} + /* * Save configure options into BACKUP_CATALOG_CONF_FILE. Do not save default * values into the file. */ void -do_set_config(InstanceState *instanceState, bool missing_ok) +do_set_config(InstanceState *instanceState) { - char path_temp[MAXPGPATH]; - FILE *fp; + ft_strbuf_t buf = ft_strbuf_zero(); + err_i err = $noerr(); int i; - snprintf(path_temp, sizeof(path_temp), "%s.tmp", instanceState->instance_config_path); - - if (!missing_ok && !fileExists(instanceState->instance_config_path, FIO_LOCAL_HOST)) - elog(ERROR, "Configuration file \"%s\" doesn't exist", instanceState->instance_config_path); - - fp = fopen(path_temp, "wt"); - if (fp == NULL) - elog(ERROR, "Cannot create configuration file \"%s\": %s", - BACKUP_CATALOG_CONF_FILE, strerror(errno)); - current_group = NULL; for (i = 0; instance_options[i].type; i++) { - int rc = 0; ConfigOption *opt = &instance_options[i]; char *value; @@ -325,37 +347,24 @@ do_set_config(InstanceState *instanceState, bool missing_ok) if (current_group == NULL || strcmp(opt->group, current_group) != 0) { current_group = opt->group; - fprintf(fp, "# %s\n", current_group); + ft_strbuf_catf(&buf, "# %s\n", current_group); } if (strchr(value, ' ')) - rc = fprintf(fp, "%s = '%s'\n", opt->lname, value); + ft_strbuf_catf(&buf, "%s = '%s'\n", opt->lname, value); else - rc = fprintf(fp, "%s = %s\n", opt->lname, value); - - if (rc < 0) - elog(ERROR, "Cannot write to configuration file: \"%s\"", path_temp); + ft_strbuf_catf(&buf, "%s = %s\n", opt->lname, value); pfree(value); } - if (ferror(fp) || fflush(fp)) - elog(ERROR, "Cannot write to configuration file: \"%s\"", path_temp); - - if (fclose(fp)) - elog(ERROR, "Cannot close configuration file: \"%s\"", path_temp); - - if (fio_sync(FIO_LOCAL_HOST, path_temp) != 0) - elog(ERROR, "Failed to sync temp configuration file \"%s\": %s", - path_temp, strerror(errno)); + err = $i(pioWriteFile, instanceState->backup_location, + .path = instanceState->instance_config_path, + .content = ft_bytes(buf.ptr, buf.len), + .binary = false); - if (rename(path_temp, instanceState->instance_config_path) < 0) - { - int errno_temp = errno; - unlink(path_temp); - elog(ERROR, "Cannot rename configuration file \"%s\" to \"%s\": %s", - path_temp, instanceState->instance_config_path, strerror(errno_temp)); - } + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Writting configuration file"); } void @@ -604,7 +613,8 @@ readInstanceConfigFile(InstanceState *instanceState) return NULL; } - parsed_options = config_read_opt(instanceState->instance_config_path, + parsed_options = config_read_opt(instanceState->backup_location, + instanceState->instance_config_path, instance_options, WARNING, true, true); if (parsed_options == 0) diff --git a/src/init.c b/src/init.c index b25676a3e..6c7439eef 100644 --- a/src/init.c +++ b/src/init.c @@ -151,7 +151,7 @@ do_add_instance(InstanceState *instanceState, InstanceConfig *instance) SOURCE_DEFAULT); /* pgdata was set through command line */ - do_set_config(instanceState, true); + do_set_config(instanceState); elog(INFO, "Instance '%s' successfully inited", instanceState->instance_name); return 0; diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 9a59ff3ab..4e2c9b6f6 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -304,7 +304,7 @@ main(int argc, char *argv[]) init_console(); /* Initialize current backup */ - pgBackupInit(¤t); + pgBackupInit(¤t, $null(pioDrive)); /* Initialize current instance configuration */ //TODO get git of this global variable craziness @@ -474,12 +474,13 @@ main(int argc, char *argv[]) if (!is_absolute_path(backup_path)) elog(ERROR, "-B, --backup-path must be an absolute path"); - catalogState = pgut_new(CatalogState); + catalogState = pgut_new0(CatalogState); strncpy(catalogState->catalog_path, backup_path, MAXPGPATH); join_path_components(catalogState->backup_subdir_path, catalogState->catalog_path, BACKUPS_DIR); join_path_components(catalogState->wal_subdir_path, catalogState->catalog_path, WAL_SUBDIR); + catalogState->backup_location = pioDriveForLocation(FIO_BACKUP_HOST); } /* backup_path is required for all pg_probackup commands except help, version, checkdb and catchup */ @@ -506,17 +507,7 @@ main(int argc, char *argv[]) } else { - instanceState = pgut_new(InstanceState); - instanceState->catalog_state = catalogState; - - strncpy(instanceState->instance_name, instance_name, MAXPGPATH); - join_path_components(instanceState->instance_backup_subdir_path, - catalogState->backup_subdir_path, instanceState->instance_name); - join_path_components(instanceState->instance_wal_subdir_path, - catalogState->wal_subdir_path, instanceState->instance_name); - join_path_components(instanceState->instance_config_path, - instanceState->instance_backup_subdir_path, BACKUP_CATALOG_CONF_FILE); - + instanceState = makeInstanceState(catalogState, instance_name); } /* ===== instanceState (END) ======*/ @@ -537,7 +528,7 @@ main(int argc, char *argv[]) { pio_stat_t st; - st = $i(pioStat, pioDriveForLocation(FIO_BACKUP_HOST), + st = $i(pioStat, catalogState->backup_location, .path = instanceState->instance_backup_subdir_path, .follow_symlink = true, .err = &err); @@ -574,10 +565,8 @@ main(int argc, char *argv[]) if (backup_subcmd != ADD_INSTANCE_CMD && backup_subcmd != ARCHIVE_GET_CMD) { - if (backup_subcmd == CHECKDB_CMD) - config_read_opt(instanceState->instance_config_path, instance_options, ERROR, true, true); - else - config_read_opt(instanceState->instance_config_path, instance_options, ERROR, true, false); + config_read_opt(instanceState->backup_location, instanceState->instance_config_path, + instance_options, ERROR, true, backup_subcmd == CHECKDB_CMD); /* * We can determine our location only after reading the configuration file, @@ -592,6 +581,16 @@ main(int argc, char *argv[]) config_get_opt_env(instance_options); } + /* reset, since it could be changed in setMyLocation above */ + if (catalogState) + catalogState->backup_location = pioDriveForLocation(FIO_BACKUP_HOST); + if (instanceState) + { + instanceState->backup_location = pioDriveForLocation(FIO_BACKUP_HOST); + instanceState->database_location = pioDriveForLocation(FIO_DB_HOST); + } + current.backup_location = pioDriveForLocation(FIO_BACKUP_HOST); + /* * Disable logging into file for archive-push and archive-get. * Note, that we should NOT use fio_is_remote() here, @@ -891,7 +890,7 @@ main(int argc, char *argv[]) */ char *stripped_wal_file_path = pgut_str_strip_trailing_filename(wal_file_path, wal_file_name); join_path_components(archive_push_xlog_dir, instance_config.pgdata, XLOGDIR); - if ($i(pioFilesAreSame, pioDriveForLocation(FIO_DB_HOST), + if ($i(pioFilesAreSame, instanceState->database_location, .file1 = stripped_wal_file_path, .file2 = archive_push_xlog_dir)) { /* 2nd case */ @@ -1043,7 +1042,7 @@ main(int argc, char *argv[]) do_show_config(); break; case SET_CONFIG_CMD: - do_set_config(instanceState, false); + do_set_config(instanceState); break; case SET_BACKUP_CMD: if (!backup_id_string) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 20d1ec295..55f3a3c80 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -497,7 +497,6 @@ struct pgBackup /* map used for access to page headers */ HeaderMap hdr_map; - pioDrive_i database_location; /* Where to backup from/restore to */ pioDrive_i backup_location; /* Where to save to/read from */ char backup_id_encoded[base36bufsize]; @@ -771,6 +770,9 @@ typedef struct InstanceState //TODO split into some more meaningdul parts InstanceConfig *config; + + pioDrive_i database_location; + pioDrive_i backup_location; } InstanceState; /* ===== instanceState (END) ===== */ @@ -859,8 +861,10 @@ extern void do_archive_get(InstanceState *instanceState, InstanceConfig *instanc char *wal_file_name, int batch_size, bool validate_wal); /* in configure.c */ +extern int config_read_opt(pioDrive_i drive, const char *path, ConfigOption options[], int elevel, + bool strict, bool missing_ok); extern void do_show_config(void); -extern void do_set_config(InstanceState *instanceState, bool missing_ok); +extern void do_set_config(InstanceState *instanceState); extern void init_config(InstanceConfig *config, const char *instance_name); extern InstanceConfig *readInstanceConfigFile(InstanceState *instanceState); @@ -911,7 +915,7 @@ extern parray* get_history_streaming(ConnectionOptions *conn_opt, TimeLineID tli #define PAGE_LSN_FROM_FUTURE (-6) /* in catalog.c */ -extern pgBackup *read_backup(const char *root_dir); +extern pgBackup *read_backup(pioDrive_i drive, const char *root_dir); extern void write_backup(pgBackup *backup, bool strict); extern void write_backup_status(pgBackup *backup, BackupStatus status, bool strict); @@ -944,10 +948,11 @@ extern ft_str_t pgBackupWriteControl(pgBackup *backup, bool utc); extern void write_backup_filelist(pgBackup *backup, parray *files, const char *root, parray *external_list, bool sync); +extern InstanceState* makeInstanceState(CatalogState* catalogState, const char* name); extern void pgBackupInitDir(pgBackup *backup, const char *backup_instance_path); extern void pgNodeInit(PGNodeInfo *node); -extern void pgBackupInit(pgBackup *backup); +extern void pgBackupInit(pgBackup *backup, pioDrive_i drive); extern void pgBackupFree(void *backup); extern int pgBackupCompareId(const void *f1, const void *f2); extern int pgBackupCompareIdDesc(const void *f1, const void *f2); diff --git a/src/pg_probackup_state.h b/src/pg_probackup_state.h index 56d852537..51f5dd092 100644 --- a/src/pg_probackup_state.h +++ b/src/pg_probackup_state.h @@ -19,7 +19,9 @@ typedef struct CatalogState char backup_subdir_path[MAXPGPATH]; /* $BACKUP_PATH/wal */ char wal_subdir_path[MAXPGPATH]; // previously global var arclog_path -} CatalogState; + + pioDrive_i backup_location; +} CatalogState; /* ====== CatalogState (END) ======= */ diff --git a/src/restore.c b/src/restore.c index e5330c947..21ea11f64 100644 --- a/src/restore.c +++ b/src/restore.c @@ -62,7 +62,8 @@ static void create_recovery_conf(InstanceState *instanceState, time_t backup_id, static void *restore_files(void *arg); static void set_orphan_status(parray *backups, pgBackup *parent_backup); -static void restore_chain(pgBackup *dest_backup, parray *parent_chain, +static void restore_chain(InstanceState *instanceState, + pgBackup *dest_backup, parray *parent_chain, parray *dbOid_exclude_list, pgRestoreParams *params, const char *pgdata_path, bool no_sync, bool cleanup_pgdata, bool backup_has_tblspc); @@ -665,7 +666,7 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg backup_id_of(dest_backup), dest_backup->server_version); - restore_chain(dest_backup, parent_chain, dbOid_exclude_list, params, + restore_chain(instanceState, dest_backup, parent_chain, dbOid_exclude_list, params, instance_config.pgdata, no_sync, cleanup_pgdata, backup_has_tblspc); //TODO rename and update comment @@ -692,7 +693,8 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg * Flag 'cleanup_pgdata' demands the removing of already existing content in PGDATA. */ void -restore_chain(pgBackup *dest_backup, parray *parent_chain, +restore_chain(InstanceState *instanceState, + pgBackup *dest_backup, parray *parent_chain, parray *dbOid_exclude_list, pgRestoreParams *params, const char *pgdata_path, bool no_sync, bool cleanup_pgdata, bool backup_has_tblspc) @@ -815,7 +817,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, { char *dirpath = parray_get(external_dirs, i); - err = $i(pioMakeDir, dest_backup->database_location, + err = $i(pioMakeDir, instanceState->database_location, .path = dirpath, .mode = DIR_PERMISSION, .strict = false); if ($haserr(err)) { @@ -848,7 +850,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, join_path_components(dirpath, external_path, file->rel_path); elog(LOG, "Create external directory \"%s\"", dirpath); - err = $i(pioMakeDir, dest_backup->database_location, .path = dirpath, + err = $i(pioMakeDir, instanceState->database_location, .path = dirpath, .mode = file->mode, .strict = false); if ($haserr(err)) { @@ -1554,7 +1556,7 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, join_path_components(postgres_auto_path, instance_config.pgdata, "postgresql.auto.conf"); - old_content = $i(pioReadFile, backup->database_location, + old_content = $i(pioReadFile, instanceState->database_location, .path = postgres_auto_path, .err = &err, .binary = false); /* file not found is not an error case */ @@ -1613,7 +1615,7 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, elog(LOG, "creating recovery.signal file"); join_path_components(path, instance_config.pgdata, "recovery.signal"); - err = $i(pioWriteFile, backup->database_location, .path = path, + err = $i(pioWriteFile, instanceState->database_location, .path = path, .content = zero); if ($haserr(err)) @@ -1625,7 +1627,7 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, elog(LOG, "creating standby.signal file"); join_path_components(path, instance_config.pgdata, "standby.signal"); - err = $i(pioWriteFile, backup->database_location, .path = path, + err = $i(pioWriteFile, instanceState->database_location, .path = path, .content = zero); if ($haserr(err)) @@ -1634,7 +1636,7 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, } /* Write data to postgresql.auto.conf.tmp */ - err = $i(pioWriteFile, backup->database_location, + err = $i(pioWriteFile, instanceState->database_location, .path = postgres_auto_path, .content = ft_str2bytes(ft_strbuf_ref(&content))); ft_strbuf_free(&content); diff --git a/src/utils/configuration.c b/src/utils/configuration.c index 4b8f31d49..0d760abac 100644 --- a/src/utils/configuration.c +++ b/src/utils/configuration.c @@ -532,36 +532,21 @@ config_get_opt(int argc, char **argv, ConfigOption cmd_options[], * Return number of parsed options. */ int -config_read_opt(const char *path, ConfigOption options[], int elevel, - bool strict, bool missing_ok) +config_parse_opt(ft_bytes_t content, const char *path, + ConfigOption options[], int elevel, bool strict) { - pioDrive_i local_drive = pioDriveForLocation(FIO_BACKUP_HOST); ft_strbuf_t key = ft_strbuf_zero(); ft_strbuf_t value = ft_strbuf_zero(); int parsed_options = 0; int lno = 0; - err_i err = $noerr(); - ft_bytes_t config_file, to_free; if (!options) return parsed_options; - config_file = $i(pioReadFile, local_drive, .path = path, .binary = false, - .err = &err); - if ($haserr(err)) - { - if (missing_ok && getErrno(err) == ENOENT) - return 0; - - ft_logerr(FT_FATAL, $errmsg(err), "could not read file"); - return 0; - } - to_free = config_file; - - while (config_file.len > 0) + while (content.len > 0) { size_t i; - ft_bytes_t line = ft_bytes_shift_line(&config_file); + ft_bytes_t line = ft_bytes_shift_line(&content); enum pair_result pr; lno++; @@ -599,7 +584,6 @@ config_read_opt(const char *path, ConfigOption options[], int elevel, ft_strbuf_reset_for_reuse(&value); } - ft_bytes_free(&to_free); ft_strbuf_free(&key); ft_strbuf_free(&value); diff --git a/src/utils/configuration.h b/src/utils/configuration.h index e42544466..12c70e5be 100644 --- a/src/utils/configuration.h +++ b/src/utils/configuration.h @@ -105,8 +105,8 @@ extern ProbackupSubcmd parse_subcmd(const char * const subcmd_str); extern const char *get_subcmd_name(ProbackupSubcmd const subcmd); extern int config_get_opt(int argc, char **argv, ConfigOption cmd_options[], ConfigOption options[]); -extern int config_read_opt(const char *path, ConfigOption options[], int elevel, - bool strict, bool missing_ok); +extern int config_parse_opt(ft_bytes_t content, const char *path, + ConfigOption options[], int elevel, bool strict); extern void config_get_opt_env(ConfigOption options[]); extern void config_set_opt(ConfigOption options[], void *var, OptionSource source); diff --git a/src/utils/file.c b/src/utils/file.c index 062d24c57..a24657109 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -109,13 +109,28 @@ setMyLocation(ProbackupSubcmd const subcmd) elog(ERROR, "Currently remote operations on Windows are not supported"); #endif - MyLocation = IsSshProtocol() - ? (subcmd == ARCHIVE_PUSH_CMD || subcmd == ARCHIVE_GET_CMD) - ? FIO_DB_HOST - : (subcmd == BACKUP_CMD || subcmd == RESTORE_CMD || subcmd == ADD_INSTANCE_CMD || subcmd == CATCHUP_CMD) - ? FIO_BACKUP_HOST - : FIO_LOCAL_HOST - : FIO_LOCAL_HOST; + if (!IsSshProtocol()) + { + MyLocation = FIO_LOCAL_HOST; + return; + } + + switch (subcmd) + { + case ARCHIVE_GET_CMD: + case ARCHIVE_PUSH_CMD: + MyLocation = FIO_DB_HOST; + break; + case BACKUP_CMD: + case RESTORE_CMD: + case ADD_INSTANCE_CMD: + case CATCHUP_CMD: + MyLocation = FIO_BACKUP_HOST; + break; + default: + MyLocation = FIO_LOCAL_HOST; + break; + } } /* Use specified file descriptors as stdin/stdout for FIO functions */ diff --git a/src/validate.c b/src/validate.c index 2dbff543c..680ebd766 100644 --- a/src/validate.c +++ b/src/validate.c @@ -434,18 +434,10 @@ do_validate_all(CatalogState *catalogState, InstanceState *instanceState) /* * Initialize instance configuration. */ - instanceState = pgut_new(InstanceState); - strncpy(instanceState->instance_name, dent->d_name, MAXPGPATH); - - join_path_components(instanceState->instance_backup_subdir_path, - catalogState->backup_subdir_path, instanceState->instance_name); - join_path_components(instanceState->instance_wal_subdir_path, - catalogState->wal_subdir_path, instanceState->instance_name); - join_path_components(instanceState->instance_config_path, - instanceState->instance_backup_subdir_path, BACKUP_CATALOG_CONF_FILE); - - if (config_read_opt(instanceState->instance_config_path, instance_options, ERROR, false, - true) == 0) + instanceState = makeInstanceState(catalogState, dent->d_name); + + if (config_read_opt(catalogState->backup_location, instanceState->instance_config_path, + instance_options, ERROR, false, true) == 0) { elog(WARNING, "Configuration file \"%s\" is empty", instanceState->instance_config_path); corrupted_backup_found = true; @@ -453,6 +445,7 @@ do_validate_all(CatalogState *catalogState, InstanceState *instanceState) } do_validate_instance(instanceState); + pgut_free(instanceState); } } else From 5730021094f767ac19d60e03e90d1ad8219d90dd Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 6 Dec 2022 02:15:29 +0300 Subject: [PATCH 132/339] externalize readBackupControlFile --- src/catalog.c | 3 +-- src/pg_probackup.h | 1 + 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index 1cddc5ada..a8f371065 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -21,7 +21,6 @@ static pgBackup* get_closest_backup(timelineInfo *tlinfo); static pgBackup* get_oldest_backup(timelineInfo *tlinfo); static const char *backupModes[] = {"", "PAGE", "PTRACK", "DELTA", "FULL"}; -static pgBackup *readBackupControlFile(pioDrive_i drive, const char *path); static err_i create_backup_dir(pgBackup *backup, const char *backup_instance_path); static bool backup_lock_exit_hook_registered = false; @@ -2612,7 +2611,7 @@ write_backup_filelist(pgBackup *backup, parray *files, const char *root, * - Comment starts with ';'. * - Do not care section. */ -static pgBackup * +pgBackup * readBackupControlFile(pioDrive_i drive, const char *path) { pgBackup *backup = pgut_new0(pgBackup); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 55f3a3c80..12187afea 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -916,6 +916,7 @@ extern parray* get_history_streaming(ConnectionOptions *conn_opt, TimeLineID tli /* in catalog.c */ extern pgBackup *read_backup(pioDrive_i drive, const char *root_dir); +extern pgBackup *readBackupControlFile(pioDrive_i drive, const char *path); extern void write_backup(pgBackup *backup, bool strict); extern void write_backup_status(pgBackup *backup, BackupStatus status, bool strict); From a68963ce7528af45e13c3b754d189c8ed4eab09d Mon Sep 17 00:00:00 2001 From: Sofia Kopikova Date: Wed, 7 Dec 2022 11:43:48 +0300 Subject: [PATCH 133/339] [PBCKP-363] CRC32 WIP --- src/utils/file.c | 91 ++++++++++++++++++++++++++++++++++++++++++++++++ src/utils/file.h | 8 +++++ 2 files changed, 99 insertions(+) diff --git a/src/utils/file.c b/src/utils/file.c index a24657109..783b2d9f0 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -4146,6 +4146,19 @@ fobj_klass(pioGZCompress); fobj_klass(pioGZDecompress); #endif +/* CRC32 counter */ +typedef struct pioDevNull +{ +} pioDevNull; + +#define kls__pioDevNull iface__pioWriteFlush, iface(pioWriteFlush) +fobj_klass(pioDevNull); + +typedef struct pioCRC32Counter +{ + pg_crc32 crc; +} pioCRC32Counter; + static pioDrive_i localDrive; static pioDrive_i remoteDrive; @@ -5965,6 +5978,82 @@ pioGZDecompress_fobjRepr(VSelf) } #endif +/* Transform filter method */ +/* Must count crc32 of new portion of data. No output needed */ +static pioFltTransformResult +pioCRC32Counter_pioFltTransform(VSelf, ft_bytes_t rbuf, ft_bytes_t wbuf, err_i *err) +{ + Self(pioCRC32Counter); + pioFltTransformResult tr = {0, 0}; + fobj_reset_err(err); + size_t copied = ft_min(wbuf.len, rbuf.len); + + if (interrupted) + elog(ERROR, "interrupted during CRC calculation"); + + COMP_CRC32C(self->crc, rbuf.ptr, copied); + + memmove(wbuf.ptr, rbuf.ptr, copied); + + tr.produced = copied; + tr.consumed = copied; + + return tr; +} + +static size_t +pioCRC32Counter_pioFltFinish(VSelf, ft_bytes_t wbuf, err_i *err) +{ + Self(pioCRC32Counter); + fobj_reset_err(err); + + FIN_CRC32C(self->crc); + + return 0; +} + +pioWriteFlush_i +pioDevNull_alloc(void) +{ + fobj_t wrap; + + wrap = $alloc(pioDevNull); + return bind_pioWriteFlush(wrap); +} + +static size_t +pioDevNull_pioWrite(VSelf, ft_bytes_t buf, err_i *err) +{ + fobj_reset_err(err); + return buf.len; +} + +static err_i +pioDevNull_pioWriteFinish(VSelf) +{ + return $noerr(); +} + +pg_crc32 +pioCRC32Counter_getCRC32(pioCRC32Counter* flt) +{ + return flt->crc; +} + +pioCRC32Counter* +pioCRC32Counter_alloc(void) +{ + pioCRC32Counter *res; + pg_crc32 init_crc = 0; + + INIT_CRC32C(init_crc); + + res = $alloc(pioCRC32Counter, .crc = init_crc); + + return res; +} + + err_i pioCopyWithFilters(pioWriteFlush_i dest, pioRead_i src, pioFilter_i *filters, int nfilters, size_t *copied) @@ -6116,6 +6205,8 @@ fobj_klass_handle(pioLocalFile, inherits(pioFile), mth(fobjDispose, fobjRepr)); fobj_klass_handle(pioRemoteFile, inherits(pioFile), mth(fobjDispose, fobjRepr)); fobj_klass_handle(pioWriteFilter, mth(fobjDispose, fobjRepr)); fobj_klass_handle(pioReadFilter, mth(fobjDispose, fobjRepr)); +fobj_klass_handle(pioDevNull); +fobj_klass_handle(pioCRC32Counter); #ifdef HAVE_LIBZ fobj_klass_handle(pioGZCompress, mth(fobjRepr)); diff --git a/src/utils/file.h b/src/utils/file.h index bbfedd5b6..839a8e544 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -344,6 +344,14 @@ extern pioFilter_i pioGZCompressFilter(int level); extern pioFilter_i pioGZDecompressFilter(bool ignoreTruncate); #endif +typedef struct pioCRC32Counter pioCRC32Counter; +#define kls__pioCRC32Counter iface__pioFilter, iface(pioFilter) +fobj_klass(pioCRC32Counter); +extern pioCRC32Counter* pioCRC32Counter_alloc(void); +extern pg_crc32 pioCRC32Counter_getCRC32(pioCRC32Counter* flt); + +extern pioWriteFlush_i pioDevNull_alloc(void); + extern err_i pioCopyWithFilters(pioWriteFlush_i dest, pioRead_i src, pioFilter_i *filters, int nfilters, size_t *copied); #define pioCopy(dest, src, ...) ({ \ From 95e439f3108c9851c75be08e5d13ab991340d918 Mon Sep 17 00:00:00 2001 From: Sofia Kopikova Date: Wed, 7 Dec 2022 11:44:17 +0300 Subject: [PATCH 134/339] change ft_bytes_move prototype to size_t --- src/fu_util/ft_util.h | 2 +- src/fu_util/impl/ft_impl.h | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/fu_util/ft_util.h b/src/fu_util/ft_util.h index 59acc1234..c1a0865d6 100644 --- a/src/fu_util/ft_util.h +++ b/src/fu_util/ft_util.h @@ -333,7 +333,7 @@ ft_inline void ft_bytes_free(ft_bytes_t* bytes) { } ft_inline void ft_bytes_consume(ft_bytes_t *bytes, size_t cut); -ft_inline void ft_bytes_move(ft_bytes_t *dest, ft_bytes_t *src); +ft_inline size_t ft_bytes_move(ft_bytes_t *dest, ft_bytes_t *src); ft_inline ft_bytes_t ft_bytes_split(ft_bytes_t *bytes, size_t n); extern ft_bytes_t ft_bytes_shift_line(ft_bytes_t *bytes); diff --git a/src/fu_util/impl/ft_impl.h b/src/fu_util/impl/ft_impl.h index 22a956e28..987dd8251 100644 --- a/src/fu_util/impl/ft_impl.h +++ b/src/fu_util/impl/ft_impl.h @@ -330,12 +330,14 @@ ft_bytes_consume(ft_bytes_t *bytes, size_t cut) { bytes->len -= cut; } -ft_inline void +ft_inline size_t ft_bytes_move(ft_bytes_t *dest, ft_bytes_t *src) { size_t len = ft_min(dest->len, src->len); memmove(dest->ptr, src->ptr, len); ft_bytes_consume(dest, len); ft_bytes_consume(src, len); + + return len; } ft_inline bool From 89bf39c5dd8e7c868ff14afc9d523d348ac40f94 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 8 Dec 2022 07:31:37 +0300 Subject: [PATCH 135/339] pioCopy: return to single pioRead in loop and style cleanup --- src/utils/file.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index 783b2d9f0..095789770 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -6087,20 +6087,19 @@ pioCopyWithFilters(pioWriteFlush_i dest, pioRead_i src, size_t read_len = 0; size_t write_len = 0; - read_len = pioReadFull(src, ft_bytes(buf, OUT_BUF_SIZE), &rerr); + read_len = $i(pioRead, src, ft_bytes(buf, OUT_BUF_SIZE), &rerr); if (read_len == 0) break; write_len = $i(pioWrite, dest, ft_bytes(buf, read_len), &werr); - if (write_len != read_len || $haserr(werr)) + *copied += write_len; + if (write_len != read_len) { - if (!$haserr(werr)) - werr = $err(SysErr, "Short write to destination file {path}: {writtenSz} < {wantedSz}", - path($irepr(dest)), - wantedSz(read_len), writtenSz(write_len)); + werr = $err(SysErr, "Short write to destination file {path}: {writtenSz} < {wantedSz}", + path($irepr(dest)), + wantedSz(read_len), writtenSz(write_len)); } - *copied += write_len; } err = fobj_err_combine(rerr, werr); From 153b0cf04ce96eea331cb2bbaa24f0075b90e804 Mon Sep 17 00:00:00 2001 From: Sergey Fukanchik Date: Thu, 8 Dec 2022 07:56:08 +0300 Subject: [PATCH 136/339] some valgrind cleanup --- src/utils/file.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index 095789770..deff1152c 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -276,9 +276,7 @@ fio_write_all(int fd, void const* buf, size_t size) void fio_get_agent_version(int* protocol, char* payload_buf, size_t payload_buf_size) { - fio_header hdr; - hdr.cop = FIO_AGENT_VERSION; - hdr.size = 0; + fio_header hdr = (fio_header){.cop = FIO_AGENT_VERSION}; IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); @@ -541,9 +539,8 @@ fio_disconnect(void) { if (fio_stdin) { - fio_header hdr; - hdr.cop = FIO_DISCONNECT; - hdr.size = 0; + fio_header hdr = (fio_header){.cop = FIO_DISCONNECT}; + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); Assert(hdr.cop == FIO_DISCONNECTED); @@ -3512,7 +3509,7 @@ fio_list_dir_impl(int out, char* buf, pioDrive_i drive) } parray_free(file_files); - hdr.cop = FIO_SEND_FILE_EOF; + hdr = (fio_header){.cop = FIO_SEND_FILE_EOF}; IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); } @@ -4895,6 +4892,7 @@ pioRemoteDrive_pioListDir(VSelf, parray *files, const char *root, bool handle_ta char *buf = pgut_malloc(CHUNK_SIZE); /* Send to the agent message with parameters for directory listing */ + memset(&req, 0, sizeof(req)); snprintf(req.path, MAXPGPATH, "%s", root); req.handle_tablespaces = handle_tablespaces; req.follow_symlink = follow_symlink; @@ -4902,8 +4900,7 @@ pioRemoteDrive_pioListDir(VSelf, parray *files, const char *root, bool handle_ta req.skip_hidden = skip_hidden; req.external_dir_num = external_dir_num; - hdr.cop = FIO_LIST_DIR; - hdr.size = sizeof(req); + hdr = (fio_header){.cop = FIO_LIST_DIR, .size=sizeof(req)}; IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_write_all(fio_stdout, &req, hdr.size), hdr.size); From b6e8f1ebf590608c3c04ac68c759037f162eb780 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 8 Dec 2022 10:05:39 +0300 Subject: [PATCH 137/339] fu_util: assert on method missing --- src/fu_util/impl/fo_impl.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/fu_util/impl/fo_impl.c b/src/fu_util/impl/fo_impl.c index 26f8b7625..9b1dc609a 100644 --- a/src/fu_util/impl/fo_impl.c +++ b/src/fu_util/impl/fo_impl.c @@ -179,6 +179,7 @@ fobj__method_callback_t fobj_method_search(const fobj_t self, fobj_method_handle_t meth, fobj_klass_handle_t for_child, bool validate) { fobj_header_t *h; fobj_klass_handle_t klass; + fobj_klass_handle_t for_klass; fobj__method_callback_t cb = {self, NULL}; if (ft_unlikely(ft_dbg_enabled())) { @@ -214,6 +215,8 @@ fobj_method_search(const fobj_t self, fobj_method_handle_t meth, fobj_klass_hand klass = fobj_klasses[klass].parent; } + for_klass = klass; + do { cb.impl = fobj_search_impl(meth, klass); if (cb.impl != NULL) @@ -221,6 +224,10 @@ fobj_method_search(const fobj_t self, fobj_method_handle_t meth, fobj_klass_hand klass = fobj_klasses[klass].parent; } while (klass); + if (validate) + ft_assert(cb.impl != NULL, "Klass '%s' has no method '%s'", + fobj_klasses[for_klass].name, + fobj_methods[meth].name); cb.self = NULL; return cb; } From 4fa34c61f74fe9ecceeeb805d08149e4a14f9443 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 8 Dec 2022 21:05:09 +0300 Subject: [PATCH 138/339] [PBCKP-336] calculate CRC32 inplace --- src/utils/file.c | 70 ++++++++++++++++++++++++++++++++++++++++++------ src/utils/file.h | 6 ++++- 2 files changed, 67 insertions(+), 9 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index deff1152c..66faaf70e 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -4104,6 +4104,7 @@ fobj_klass(pioRemoteFile); typedef struct pioReadFilter { pioRead_i wrapped; pioFilter_i filter; + pioFltInPlace_i inplace; char* buffer; size_t len; size_t capa; @@ -4116,6 +4117,7 @@ fobj_klass(pioReadFilter); typedef struct pioWriteFilter { pioWriteFlush_i wrapped; pioFilter_i filter; + pioFltInPlace_i inplace; char* buffer; size_t capa; bool finished; @@ -4154,6 +4156,7 @@ fobj_klass(pioDevNull); typedef struct pioCRC32Counter { pg_crc32 crc; + int64_t size; } pioCRC32Counter; static pioDrive_i localDrive; @@ -5483,7 +5486,7 @@ pioRead_i pioWrapReadFilter(pioRead_i fl, pioFilter_i flt, size_t buf_size) { void *buf; - fobj_t wrap; + pioReadFilter* wrap; buf = ft_malloc(buf_size); wrap = $alloc(pioReadFilter, @@ -5491,6 +5494,7 @@ pioWrapReadFilter(pioRead_i fl, pioFilter_i flt, size_t buf_size) .filter = $iref(flt), .buffer = buf, .capa = buf_size); + $implements(pioFltInPlace, flt.self, &wrap->inplace); return bind_pioRead(wrap); } @@ -5507,6 +5511,26 @@ pioReadFilter_pioRead(VSelf, ft_bytes_t wbuf, err_i *err) if (self->eof && self->finished) return 0; + if ($notNULL(self->inplace) && !self->eof) + { + r = pioReadFull(self->wrapped, wbuf, err); + if (r > 0) + { + err_i flterr; + flterr = $i(pioFltInPlace, self->inplace, ft_bytes(wbuf.ptr, r)); + *err = fobj_err_combine(*err, flterr); + ft_bytes_consume(&wbuf, r); + } + + if ($haserr(*err)) + return wlen - wbuf.len; + if (r == 0) + { + self->eof = true; + goto eof; + } + } + while (wbuf.len > 0) { /* feed filter */ @@ -5543,6 +5567,7 @@ pioReadFilter_pioRead(VSelf, ft_bytes_t wbuf, err_i *err) self->len += r; } +eof: while (wbuf.len > 0 && self->eof) { r = $i(pioFltFinish, self->filter, wbuf, err); @@ -5599,7 +5624,7 @@ pioWriteFlush_i pioWrapWriteFilter(pioWriteFlush_i fl, pioFilter_i flt, size_t buf_size) { void *buf; - fobj_t wrap; + pioWriteFilter* wrap; buf = ft_malloc(buf_size); wrap = $alloc(pioWriteFilter, @@ -5607,6 +5632,7 @@ pioWrapWriteFilter(pioWriteFlush_i fl, pioFilter_i flt, size_t buf_size) .filter = $iref(flt), .buffer = buf, .capa = buf_size); + $implements(pioFltInPlace, flt.self, &wrap->inplace); return bind_pioWriteFlush(wrap); } @@ -5620,6 +5646,16 @@ pioWriteFilter_pioWrite(VSelf, ft_bytes_t rbuf, err_i *err) ft_bytes_t wbuf; size_t r; + if ($notNULL(self->inplace)) + { + *err = $i(pioFltInPlace, self->inplace, rbuf); + if ($haserr(*err)) + return 0; + r = $i(pioWrite, self->wrapped, rbuf, err); + ft_bytes_consume(&rbuf, r); + return rlen - rbuf.len; + } + while (rbuf.len > 0) { wbuf = ft_bytes(self->buffer, self->capa); @@ -5994,10 +6030,22 @@ pioCRC32Counter_pioFltTransform(VSelf, ft_bytes_t rbuf, ft_bytes_t wbuf, err_i * tr.produced = copied; tr.consumed = copied; + self->size += copied; return tr; } +static err_i +pioCRC32Counter_pioFltInPlace(VSelf, ft_bytes_t rbuf) +{ + Self(pioCRC32Counter); + + COMP_CRC32C(self->crc, rbuf.ptr, rbuf.len); + self->size += rbuf.len; + + return $noerr(); +} + static size_t pioCRC32Counter_pioFltFinish(VSelf, ft_bytes_t wbuf, err_i *err) { @@ -6009,6 +6057,18 @@ pioCRC32Counter_pioFltFinish(VSelf, ft_bytes_t wbuf, err_i *err) return 0; } +pg_crc32 +pioCRC32Counter_getCRC32(pioCRC32Counter* flt) +{ + return flt->crc; +} + +int64_t +pioCRC32Counter_getSize(pioCRC32Counter* flt) +{ + return flt->size; +} + pioWriteFlush_i pioDevNull_alloc(void) { @@ -6031,12 +6091,6 @@ pioDevNull_pioWriteFinish(VSelf) return $noerr(); } -pg_crc32 -pioCRC32Counter_getCRC32(pioCRC32Counter* flt) -{ - return flt->crc; -} - pioCRC32Counter* pioCRC32Counter_alloc(void) { diff --git a/src/utils/file.h b/src/utils/file.h index 839a8e544..8a949fd38 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -329,6 +329,9 @@ fobj_method(pioFltTransform); #define mth__pioFltFinish size_t, (ft_bytes_t, out), (err_i*, err) fobj_method(pioFltFinish); +#define mth__pioFltInPlace err_i, (ft_bytes_t, buf) +fobj_method(pioFltInPlace); + #define iface__pioFilter mth(pioFltTransform, pioFltFinish) fobj_iface(pioFilter); @@ -345,10 +348,11 @@ extern pioFilter_i pioGZDecompressFilter(bool ignoreTruncate); #endif typedef struct pioCRC32Counter pioCRC32Counter; -#define kls__pioCRC32Counter iface__pioFilter, iface(pioFilter) +#define kls__pioCRC32Counter iface__pioFilter, mth(pioFltInPlace), iface(pioFilter) fobj_klass(pioCRC32Counter); extern pioCRC32Counter* pioCRC32Counter_alloc(void); extern pg_crc32 pioCRC32Counter_getCRC32(pioCRC32Counter* flt); +extern int64_t pioCRC32Counter_getSize(pioCRC32Counter* flt); extern pioWriteFlush_i pioDevNull_alloc(void); From f1e89a26d774967b973aaf696dddd4d7b071e2dd Mon Sep 17 00:00:00 2001 From: Sergey Fukanchik Date: Wed, 30 Nov 2022 15:33:58 +0300 Subject: [PATCH 139/339] [PBCKP-338] page iterator --- src/backup.c | 1 + src/checkdb.c | 3 +- src/data.c | 526 ++++++++++++++++--------------------------- src/extra.h | 32 +++ src/fu_util/fo_obj.h | 1 + src/pg_probackup.h | 44 +--- src/utils/file.c | 438 ++++++++++++++++++++++++++++++++++- src/utils/file.h | 41 +++- tests/backup_test.py | 2 +- 9 files changed, 711 insertions(+), 377 deletions(-) create mode 100644 src/extra.h diff --git a/src/backup.c b/src/backup.c index 9b778285f..90bfb439f 100644 --- a/src/backup.c +++ b/src/backup.c @@ -1950,6 +1950,7 @@ backup_cleanup(bool fatal, void *userdata) static void * backup_files(void *arg) { + FOBJ_FUNC_ARP(); int i; char from_fullpath[MAXPGPATH]; char to_fullpath[MAXPGPATH]; diff --git a/src/checkdb.c b/src/checkdb.c index bc3c60fc5..055e5beaf 100644 --- a/src/checkdb.c +++ b/src/checkdb.c @@ -171,8 +171,7 @@ check_files(void *arg) * uses global variables to set connections. * Need refactoring. */ - if (!check_data_file(&(arguments->conn_arg), - file, from_fullpath, + if (!check_data_file(file, from_fullpath, arguments->checksum_version)) arguments->ret = 2; /* corruption found */ } diff --git a/src/data.c b/src/data.c index d4bdd995f..0039a6df8 100644 --- a/src/data.c +++ b/src/data.c @@ -36,6 +36,15 @@ typedef struct DataPage static bool get_page_header(FILE *in, const char *fullpath, BackupPageHeader *bph, pg_crc32 *crc, uint32 backup_version); +static int send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, + XLogRecPtr prev_backup_start_lsn, CompressAlg calg, int clevel, + uint32 checksum_version, + BackupPageHeader2 **headers, BackupMode backup_mode); + +static int copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, + XLogRecPtr sync_lsn, uint32 checksum_version, + BackupMode backup_mode); + #ifdef HAVE_LIBZ /* Implementation of zlib compression method */ static int32 @@ -276,7 +285,7 @@ get_checksum_errormsg(Page page, char **errormsg, BlockNumber absolute_blkno) * TODO: probably we should always * return it to the caller */ -static int32 +int32 prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn, BlockNumber blknum, FILE *in, BackupMode backup_mode, @@ -412,39 +421,49 @@ prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn, return PageIsOk; } -/* split this function in two: compress() and backup() */ -static int -compress_and_backup_page(pgFile *file, BlockNumber blknum, - FILE *in, FILE *out, pg_crc32 *crc, - int page_state, Page page, - CompressAlg calg, int clevel, - const char *from_fullpath, const char *to_fullpath) +int +compress_page(char *write_buffer, size_t buffer_size, BlockNumber blknum, void *page, + CompressAlg calg, int clevel, const char *from_fullpath) { - int compressed_size = 0; - size_t write_buffer_size = 0; - char write_buffer[BLCKSZ*2]; /* compressed page may require more space than uncompressed */ - BackupPageHeader* bph = (BackupPageHeader*)write_buffer; const char *errormsg = NULL; + int compressed_size; /* Compress the page */ - compressed_size = do_compress(write_buffer + sizeof(BackupPageHeader), - sizeof(write_buffer) - sizeof(BackupPageHeader), - page, BLCKSZ, calg, clevel, + compressed_size = do_compress(write_buffer, buffer_size, page, BLCKSZ, calg, clevel, &errormsg); /* Something went wrong and errormsg was assigned, throw a warning */ if (compressed_size < 0 && errormsg != NULL) elog(WARNING, "An error occured during compressing block %u of file \"%s\": %s", blknum, from_fullpath, errormsg); - file->compress_alg = calg; /* TODO: wtf? why here? */ - - /* compression didn`t worked */ + /* Compression skip magic part 1: compression didn`t work + * compresssed_size == BLCKSZ is a flag which shows non-compressed state + */ if (compressed_size <= 0 || compressed_size >= BLCKSZ) { /* Do not compress page */ - memcpy(write_buffer + sizeof(BackupPageHeader), page, BLCKSZ); + memcpy(write_buffer, page, BLCKSZ); compressed_size = BLCKSZ; } + + return compressed_size; +} + +static int +backup_page(pgFile *file, BlockNumber blknum, pioFile_i out, + pg_crc32 *crc, void *compressed_page, size_t compressed_size, CompressAlg calg, int clevel, + const char *from_fullpath, const char *to_fullpath) +{ + size_t write_buffer_size = 0; + char write_buffer[BLCKSZ*2]; /* compressed page may require more space than uncompressed */ + BackupPageHeader* bph = (BackupPageHeader*)write_buffer; + err_i err = $noerr(); + size_t rc; + + memcpy(write_buffer + sizeof(BackupPageHeader), compressed_page, compressed_size); + + file->compress_alg = calg; /* TODO: wtf? why here? */ + bph->block = blknum; bph->compressed_size = compressed_size; write_buffer_size = compressed_size + sizeof(BackupPageHeader); @@ -453,7 +472,12 @@ compress_and_backup_page(pgFile *file, BlockNumber blknum, COMP_CRC32C(*crc, write_buffer, write_buffer_size); /* write data page */ - if (fio_fwrite(out, write_buffer, write_buffer_size) != write_buffer_size) + rc = $i(pioWrite, out, .buf = ft_bytes(write_buffer, write_buffer_size), .err = &err); + if ($haserr(err)) + { + ft_logerr(ERROR, $errmsg(err), "Write error in compress and backup"); + } + if (rc != write_buffer_size) elog(ERROR, "File: \"%s\", cannot write at block %u: %s", to_fullpath, blknum, strerror(errno)); @@ -465,10 +489,32 @@ compress_and_backup_page(pgFile *file, BlockNumber blknum, /* Write page as-is. TODO: make it fastpath option in compress_and_backup_page() */ static int -write_page(pgFile *file, FILE *out, Page page) +write_page(pgFile *file, pioFile_i out, int blknum, Page page) { + err_i err = $noerr(); + off_t target = blknum * BLCKSZ; + off_t position; + size_t rc; + + position = $i(pioSeek, out, target, &err); + if ($haserr(err)) + { + ft_logerr(FT_ERROR, $errmsg(err), "write_page"); + } + if (position != target) + { + elog(ERROR, "Can't seek to position %ld", target); + } /* write data page */ - if (fio_fwrite(out, page, BLCKSZ) != BLCKSZ) + err = $noerr(); + + rc = $i(pioWrite, out, .buf = ft_bytes(page, BLCKSZ), .err = &err); + + if ($haserr(err)) + { + ft_log(FT_INFO, $errmsg(err), "write_page"); + } + if (rc != BLCKSZ) return -1; file->write_size += BLCKSZ; @@ -492,7 +538,6 @@ backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpat HeaderMap *hdr_map, bool is_merge) { int rc; - bool use_pagemap; char *errmsg = NULL; BlockNumber err_blknum = 0; /* page headers */ @@ -543,36 +588,13 @@ backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpat * Such files should be fully copied. */ - if (file->pagemap.bitmapsize == PageBitmapIsEmpty || - file->pagemap_isabsent || !file->exists_in_prev || - !file->pagemap.bitmap) - use_pagemap = false; - else - use_pagemap = true; - - /* Remote mode */ - if (fio_is_remote(FIO_DB_HOST)) - { - rc = fio_send_pages(to_fullpath, from_fullpath, file, - /* send prev backup START_LSN */ - (backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) && - file->exists_in_prev ? prev_backup_start_lsn : InvalidXLogRecPtr, - calg, clevel, checksum_version, - /* send pagemap if any */ - use_pagemap, - /* variables for error reporting */ - &err_blknum, &errmsg, &headers); - } - else - { - /* TODO: stop handling errors internally */ - rc = send_pages(to_fullpath, from_fullpath, file, - /* send prev backup START_LSN */ - (backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) && - file->exists_in_prev ? prev_backup_start_lsn : InvalidXLogRecPtr, - calg, clevel, checksum_version, use_pagemap, - &headers, backup_mode); - } + /* send prev backup START_LSN */ + XLogRecPtr start_lsn = (backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) && + file->exists_in_prev ? prev_backup_start_lsn : InvalidXLogRecPtr; + /* TODO: stop handling errors internally */ + rc = send_pages(to_fullpath, from_fullpath, file, start_lsn, + calg, clevel, checksum_version, + &headers, backup_mode); /* check for errors */ if (rc == FILE_MISSING) @@ -655,7 +677,6 @@ catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpa uint32 checksum_version, int64_t prev_size) { int rc; - bool use_pagemap; char *errmsg = NULL; BlockNumber err_blknum = 0; @@ -687,43 +708,12 @@ catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpa file->write_size = 0; file->uncompressed_size = 0; - /* - * If page map is empty or file is not present in destination directory, - * then copy backup all pages of the relation. - */ - - if (file->pagemap.bitmapsize == PageBitmapIsEmpty || - file->pagemap_isabsent || !file->exists_in_prev || - !file->pagemap.bitmap) - use_pagemap = false; - else - use_pagemap = true; - - if (use_pagemap) - elog(LOG, "Using pagemap for file \"%s\"", file->rel_path); - - /* Remote mode */ - if (fio_is_remote(FIO_DB_HOST)) - { - rc = fio_copy_pages(to_fullpath, from_fullpath, file, - /* send prev backup START_LSN */ - ((backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) && - file->exists_in_prev) ? sync_lsn : InvalidXLogRecPtr, - NONE_COMPRESS, 1, checksum_version, - /* send pagemap if any */ - use_pagemap, - /* variables for error reporting */ - &err_blknum, &errmsg); - } - else - { - /* TODO: stop handling errors internally */ - rc = copy_pages(to_fullpath, from_fullpath, file, - /* send prev backup START_LSN */ - ((backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) && - file->exists_in_prev) ? sync_lsn : InvalidXLogRecPtr, - checksum_version, use_pagemap, backup_mode); - } + /* send prev backup START_LSN */ + XLogRecPtr start_lsn = ((backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) && + file->exists_in_prev) ? sync_lsn : InvalidXLogRecPtr; + /* TODO: stop handling errors internally */ + rc = copy_pages(to_fullpath, from_fullpath, file, start_lsn, + checksum_version, backup_mode); /* check for errors */ if (rc == FILE_MISSING) @@ -1153,6 +1143,7 @@ restore_data_file_internal(FILE *in, FILE *out, pgFile *file, uint32 backup_vers cur_pos_in += read_len; /* + * Compression skip magic part 2: * if page size is smaller than BLCKSZ, decompress the page. * BUGFIX for versions < 2.0.23: if page size is equal to BLCKSZ. * we have to check, whether it is compressed or not using @@ -1561,66 +1552,51 @@ validate_one_page(Page page, BlockNumber absolute_blkno, * also returns true if the file was not found */ bool -check_data_file(ConnectionArgs *arguments, pgFile *file, - const char *from_fullpath, uint32 checksum_version) +check_data_file(pgFile *file, const char *from_fullpath, uint32 checksum_version) { - FILE *in; - BlockNumber blknum = 0; - BlockNumber nblocks = 0; - int page_state; - char curr_page[BLCKSZ]; - bool is_valid = true; - - in = fopen(from_fullpath, PG_BINARY_R); - if (in == NULL) - { - /* - * If file is not found, this is not en error. - * It could have been deleted by concurrent postgres transaction. - */ - if (errno == ENOENT) - { + FOBJ_FUNC_ARP(); + pioDrive_i local_location = pioDriveForLocation(FIO_LOCAL_HOST); + pioPagesIterator_i pages; + bool is_valid = true; + err_i err; + + pages = $i(pioIteratePages, local_location, .from_fullpath = from_fullpath, + .file = file, .start_lsn = InvalidXLogRecPtr, .calg = /* No data needed */ NONE_COMPRESS, .clevel = 0, + .checksum_version = checksum_version, .backup_mode = BACKUP_MODE_FULL, + .strict = false, .err = &err); + if ($haserr(err)) + { + if (getErrno(err) == ENOENT) { elog(LOG, "File \"%s\" is not found", from_fullpath); return true; } - - elog(WARNING, "Cannot open file \"%s\": %s", - from_fullpath, strerror(errno)); + ft_logerr(FT_WARNING, $errmsg(err), "Cannot open file \"%s\"", from_fullpath); return false; } - if (file->size % BLCKSZ != 0) - elog(WARNING, "File: \"%s\", invalid file size %zu", from_fullpath, file->size); - - /* - * Compute expected number of blocks in the file. - * NOTE This is a normal situation, if the file size has changed - * since the moment we computed it. - */ - nblocks = ft_div_i64u32_to_i32(file->size, BLCKSZ); - - for (blknum = 0; blknum < nblocks; blknum++) + while(true) { - PageState page_st; - page_state = prepare_page(file, InvalidXLogRecPtr, - blknum, in, BACKUP_MODE_FULL, - curr_page, false, checksum_version, - from_fullpath, &page_st); - - if (page_state == PageIsTruncated) + PageIteratorValue value; + err_i err = $i(pioNextPage, pages, &value); + if ($haserr(err)) { + ft_logerr(FT_FATAL, $errmsg(err), "Checking data file"); + return false; + } + if (value.page_result == PageIsTruncated) break; - if (page_state == PageIsCorrupted) + if (value.page_result == PageIsCorrupted) { /* Page is corrupted, no need to elog about it, * prepare_page() already done that + * + * Still check the rest of the pages too */ is_valid = false; continue; } } - fclose(in); return is_valid; } @@ -2031,106 +2007,73 @@ open_local_file_rw(const char *to_fullpath, char **out_buf, uint32 buf_size) } /* backup local file */ -int -send_pages(const char *to_fullpath, const char *from_fullpath, - pgFile *file, XLogRecPtr prev_backup_start_lsn, CompressAlg calg, int clevel, - uint32 checksum_version, bool use_pagemap, BackupPageHeader2 **headers, +static int +send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, + XLogRecPtr prev_backup_start_lsn, CompressAlg calg, int clevel, + uint32 checksum_version, BackupPageHeader2 **headers, BackupMode backup_mode) { - FILE *in = NULL; - FILE *out = NULL; - off_t cur_pos_out = 0; - char curr_page[BLCKSZ]; + FOBJ_FUNC_ARP(); + pioDrive_i backup_location = pioDriveForLocation(FIO_BACKUP_HOST); + pioDrive_i db_location = pioDriveForLocation(FIO_DB_HOST); + pioPagesIterator_i pages; int n_blocks_read = 0; - BlockNumber blknum = 0; - datapagemap_iterator_t *iter = NULL; + pioFile_i out = $null(pioFile); + off_t cur_pos_out = 0; int compressed_size = 0; BackupPageHeader2 *header = NULL; parray *harray = NULL; + err_i err = $noerr(); - /* stdio buffers */ - char *in_buf = NULL; - char *out_buf = NULL; - - /* open source file for read */ - in = fopen(from_fullpath, PG_BINARY_R); - if (in == NULL) + pages = $i(pioIteratePages, db_location, .from_fullpath = from_fullpath, .file = file, + .start_lsn = prev_backup_start_lsn, .calg = calg, .clevel = clevel, + .checksum_version = checksum_version, .backup_mode = backup_mode, + .strict = true, .err = &err); + if ($haserr(err)) { - /* - * If file is not found, this is not en error. - * It could have been deleted by concurrent postgres transaction. - */ - if (errno == ENOENT) + if (getErrno(err) == ENOENT) return FILE_MISSING; - - elog(ERROR, "Cannot open file \"%s\": %s", from_fullpath, strerror(errno)); - } - - /* - * Enable stdio buffering for local input file, - * unless the pagemap is involved, which - * imply a lot of random access. - */ - - if (use_pagemap) - { - iter = datapagemap_iterate(&file->pagemap); - datapagemap_next(iter, &blknum); /* set first block */ - - setvbuf(in, NULL, _IONBF, BUFSIZ); - } - else - { - in_buf = pgut_malloc(STDIO_BUFSIZE); - setvbuf(in, in_buf, _IOFBF, STDIO_BUFSIZE); + ft_logerr(FT_FATAL, $errmsg(err), "send_pages"); + return OPEN_FAILED; } harray = parray_new(); - - while (blknum < file->n_blocks) + while (true) { - PageState page_st; - int rc = prepare_page(file, prev_backup_start_lsn, - blknum, in, backup_mode, curr_page, - true, checksum_version, - from_fullpath, &page_st); - - if (rc == PageIsTruncated) + PageIteratorValue value; + err_i err = $i(pioNextPage, pages, &value); + if ($haserr(err)) { + ft_logerr(FT_FATAL, $errmsg(err), "sending data file pages"); + return READ_FAILED; + } + if (value.page_result == PageIsTruncated) break; - else if (rc == PageIsOk) - { - /* lazily open backup file (useful for s3) */ - if (!out) - out = open_local_file_rw(to_fullpath, &out_buf, STDIO_BUFSIZE); + if (value.page_result == PageIsOk) { + if($isNULL(out)) + { + out = $i(pioOpen, backup_location, to_fullpath, PG_BINARY|O_CREAT|O_RDWR, 0, &err); + if ($haserr(err)) + { + ft_logerr(FT_FATAL, $errmsg(err), "Cannot write file"); + } + } header = pgut_new0(BackupPageHeader2); *header = (BackupPageHeader2){ - .block = blknum, + .block = value.blknum, .pos = cur_pos_out, - .lsn = page_st.lsn, - .checksum = page_st.checksum, + .lsn = value.state.lsn, + .checksum = value.state.checksum, }; - parray_append(harray, header); - compressed_size = compress_and_backup_page(file, blknum, in, out, &(file->crc), - rc, curr_page, calg, clevel, + compressed_size = backup_page(file, value.blknum, out, &(file->crc), + value.compressed_page, value.compressed_size, calg, clevel, from_fullpath, to_fullpath); cur_pos_out += compressed_size + sizeof(BackupPageHeader); } - n_blocks_read++; - - /* next block */ - if (use_pagemap) - { - /* exit if pagemap is exhausted */ - if (!datapagemap_next(iter, &blknum)) - break; - } - else - blknum++; } /* @@ -2154,160 +2097,73 @@ send_pages(const char *to_fullpath, const char *from_fullpath, } parray_free(harray); - /* cleanup */ - if (in && fclose(in)) - elog(ERROR, "Cannot close the source file \"%s\": %s", - to_fullpath, strerror(errno)); - /* close local output file */ - if (out && fclose(out)) - elog(ERROR, "Cannot close the backup file \"%s\": %s", - to_fullpath, strerror(errno)); - - pg_free(iter); - pg_free(in_buf); - pg_free(out_buf); + if ($notNULL(out)) + $i(pioClose, out, true); return n_blocks_read; } /* - * Copy local data file just as send_pages but without attaching additional header and compression + * Copy data file just as send_pages but without attaching additional header and compression */ -int -copy_pages(const char *to_fullpath, const char *from_fullpath, - pgFile *file, XLogRecPtr sync_lsn, - uint32 checksum_version, bool use_pagemap, - BackupMode backup_mode) +static int +copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, + XLogRecPtr sync_lsn, uint32 checksum_version, + BackupMode backup_mode) { - FILE *in = NULL; - FILE *out = NULL; - char curr_page[BLCKSZ]; - int n_blocks_read = 0; - BlockNumber blknum = 0; - datapagemap_iterator_t *iter = NULL; - - /* stdio buffers */ - char *in_buf = NULL; - char *out_buf = NULL; - - /* open source file for read */ - in = fopen(from_fullpath, PG_BINARY_R); - if (in == NULL) - { - /* - * If file is not found, this is not en error. - * It could have been deleted by concurrent postgres transaction. - */ - if (errno == ENOENT) + FOBJ_FUNC_ARP(); + pioDrive_i backup_location = pioDriveForLocation(FIO_BACKUP_HOST); + int n_blocks_read = 0; + err_i err = $noerr(); + pioPagesIterator_i pages; + pioFile_i out; + + pages = $i(pioIteratePages, backup_location, .from_fullpath = from_fullpath, + .file = file, .start_lsn = sync_lsn, .calg = NONE_COMPRESS, .clevel = 0, + .checksum_version = checksum_version, + .backup_mode = backup_mode, .strict = true, .err = &err); + if ($haserr(err)) + { + if (getErrno(err) == ENOENT) return FILE_MISSING; - - elog(ERROR, "Cannot open file \"%s\": %s", from_fullpath, strerror(errno)); + ft_logerr(FT_FATAL, $errmsg(err), "Cannot iterate pages"); + return OPEN_FAILED; } - /* - * Enable stdio buffering for local input file, - * unless the pagemap is involved, which - * imply a lot of random access. - */ - - if (use_pagemap) + out = $i(pioOpen, backup_location, to_fullpath, PG_BINARY|O_RDWR|O_CREAT, file->mode, &err); + if ($haserr(err)) { - iter = datapagemap_iterate(&file->pagemap); - datapagemap_next(iter, &blknum); /* set first block */ - - setvbuf(in, NULL, _IONBF, BUFSIZ); + ft_logerr(FT_FATAL, $errmsg(err), "Cannot write output file"); } - else - { - in_buf = pgut_malloc(STDIO_BUFSIZE); - setvbuf(in, in_buf, _IOFBF, STDIO_BUFSIZE); - } - - out = fio_fopen(FIO_BACKUP_HOST, to_fullpath, PG_BINARY_R "+"); - if (out == NULL) - elog(ERROR, "Cannot open destination file \"%s\": %s", - to_fullpath, strerror(errno)); - - /* update file permission */ - if (chmod(to_fullpath, file->mode) == -1) - elog(ERROR, "Cannot change mode of \"%s\": %s", to_fullpath, - strerror(errno)); - - /* Enable buffering for output file */ - out_buf = pgut_malloc(STDIO_BUFSIZE); - setvbuf(out, out_buf, _IOFBF, STDIO_BUFSIZE); - while (blknum < file->n_blocks) + while (true) { - PageState page_st; - int rc = prepare_page(file, sync_lsn, - blknum, in, backup_mode, curr_page, - true, checksum_version, - from_fullpath, &page_st); - if (rc == PageIsTruncated) + PageIteratorValue value; + err_i err = $i(pioNextPage, pages, &value); + if ($haserr(err)) { + ft_logerr(FT_FATAL, $errmsg(err), "copying data file pages"); + return READ_FAILED; + } + if (value.page_result == PageIsTruncated) break; - else if (rc == PageIsOk) - { - if (fseek(out, blknum * BLCKSZ, SEEK_SET) != 0) - elog(ERROR, "Cannot seek to position %u in destination file \"%s\": %s", - blknum * BLCKSZ, to_fullpath, strerror(errno)); - - if (write_page(file, out, curr_page) != BLCKSZ) - elog(ERROR, "File: \"%s\", cannot write at block %u: %s", - to_fullpath, blknum, strerror(errno)); + if(value.page_result == PageIsOk) { + Assert(value.compressed_size == BLCKSZ); /* Assuming NONE_COMPRESS above */ + write_page(file, out, value.blknum, value.compressed_page); } n_blocks_read++; - - /* next block */ - if (use_pagemap) - { - /* exit if pagemap is exhausted */ - if (!datapagemap_next(iter, &blknum)) - break; - } - else - blknum++; } - /* truncate output file if required */ - if (fseek(out, 0, SEEK_END) != 0) - elog(ERROR, "Cannot seek to end of file position in destination file \"%s\": %s", - to_fullpath, strerror(errno)); + err = $i(pioTruncate, out, file->size); + if ($haserr(err)) { - int64_t pos = ftell(out); - - if (pos < 0) - elog(ERROR, "Cannot get position in destination file \"%s\": %s", - to_fullpath, strerror(errno)); - - if (pos != file->size) - { - if (fflush(out) != 0) - elog(ERROR, "Cannot flush destination file \"%s\": %s", - to_fullpath, strerror(errno)); - - if (ftruncate(fileno(out), file->size) == -1) - elog(ERROR, "Cannot ftruncate file \"%s\" to size %llu: %s", - to_fullpath, (long long)file->size, strerror(errno)); - } + ft_logerr(FT_ERROR, $errmsg(err), "truncate in copy_pages"); } - /* cleanup */ - if (fclose(in)) - elog(ERROR, "Cannot close the source file \"%s\": %s", - to_fullpath, strerror(errno)); - - /* close output file */ - if (fclose(out)) - elog(ERROR, "Cannot close the destination file \"%s\": %s", - to_fullpath, strerror(errno)); - - pg_free(iter); - pg_free(in_buf); - pg_free(out_buf); + /* close local output file */ + $i(pioClose, out, true); return n_blocks_read; } diff --git a/src/extra.h b/src/extra.h new file mode 100644 index 000000000..89b8474e7 --- /dev/null +++ b/src/extra.h @@ -0,0 +1,32 @@ +#ifndef __EXTRA_H__ +#define __EXTRA_H__ + +typedef enum CompressAlg +{ + NOT_DEFINED_COMPRESS = 0, + NONE_COMPRESS, + PGLZ_COMPRESS, + ZLIB_COMPRESS, +} CompressAlg; + +typedef struct PageState +{ + uint16 checksum; + XLogRecPtr lsn; +} PageState; + +typedef enum BackupMode +{ + BACKUP_MODE_INVALID = 0, + BACKUP_MODE_DIFF_PAGE, /* incremental page backup */ + BACKUP_MODE_DIFF_PTRACK, /* incremental page backup with ptrack system */ + BACKUP_MODE_DIFF_DELTA, /* incremental page backup with lsn comparison */ + BACKUP_MODE_FULL /* full backup */ +} BackupMode; + +typedef struct pgFile pgFile; + +int compress_page(char *write_buffer, size_t buffer_size, BlockNumber blknum, void *page, + CompressAlg calg, int clevel, const char *from_fullpath); + +#endif /* __EXTRA_H__ */ diff --git a/src/fu_util/fo_obj.h b/src/fu_util/fo_obj.h index bef1e6fcf..2144b8a38 100644 --- a/src/fu_util/fo_obj.h +++ b/src/fu_util/fo_obj.h @@ -540,6 +540,7 @@ fobj_error_kind(SysErr); fobj_error_object_key(cause); fobj_error_cstr_key(causeStr); fobj_error_int_key(errNo); +fobj_error_int_key(intCode); fobj_error_cstr_key(errNoStr); #define fobj_errno_keys(errno) (errNo, errno), (errNoStr, ft_strerror(errno)) fobj_error_cstr_key(path); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 12187afea..b663c69ae 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -33,6 +33,7 @@ #include #endif +#include "extra.h" #include "utils/configuration.h" #include "utils/logger.h" #include "utils/remote.h" @@ -145,12 +146,6 @@ typedef struct RedoParams uint32 checksum_version; } RedoParams; -typedef struct PageState -{ - uint16 checksum; - XLogRecPtr lsn; -} PageState; - typedef struct db_map_entry { Oid dbOid; @@ -191,14 +186,6 @@ typedef enum RecoverySettingsMode * if not explicitly forbidden */ } RecoverySettingsMode; -typedef enum CompressAlg -{ - NOT_DEFINED_COMPRESS = 0, - NONE_COMPRESS, - PGLZ_COMPRESS, - ZLIB_COMPRESS, -} CompressAlg; - typedef enum ForkName { none, @@ -291,15 +278,6 @@ typedef enum BackupStatus BACKUP_STATUS_CORRUPT /* files are corrupted, not available */ } BackupStatus; -typedef enum BackupMode -{ - BACKUP_MODE_INVALID = 0, - BACKUP_MODE_DIFF_PAGE, /* incremental page backup */ - BACKUP_MODE_DIFF_PTRACK, /* incremental page backup with ptrack system */ - BACKUP_MODE_DIFF_DELTA, /* incremental page backup with lsn comparison */ - BACKUP_MODE_FULL /* full backup */ -} BackupMode; - typedef enum ShowFormat { SHOW_PLAIN, @@ -1051,8 +1029,7 @@ extern bool set_forkname(pgFile *file); extern void exclude_files(parray *files, bool backup_logs); /* in data.c */ -extern bool check_data_file(ConnectionArgs *arguments, pgFile *file, - const char *from_fullpath, uint32 checksum_version); +extern bool check_data_file(pgFile *file, const char *from_fullpath, uint32 checksum_version); extern void catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpath, @@ -1071,6 +1048,14 @@ extern void backup_non_data_file_internal(const char *from_fullpath, const char *to_fullpath, pgFile *file, bool missing_ok); +extern int32 prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn, + BlockNumber blknum, FILE *in, + BackupMode backup_mode, + Page page, bool strict, + uint32 checksum_version, + const char *from_fullpath, + PageState *page_st); + extern size_t restore_data_file(parray *parent_chain, pgFile *dest_file, FILE *out, const char *to_fullpath, bool use_bitmap, PageState *checksum_map, XLogRecPtr shift_lsn, datapagemap_t *lsn_map, bool use_headers); @@ -1175,15 +1160,6 @@ extern parray * pg_ptrack_get_pagemapset(PGconn *backup_conn, const char *ptrack /* open local file to writing */ extern FILE* open_local_file_rw(const char *to_fullpath, char **out_buf, uint32 buf_size); -extern int send_pages(const char *to_fullpath, const char *from_fullpath, - pgFile *file, XLogRecPtr prev_backup_start_lsn, CompressAlg calg, int clevel, - uint32 checksum_version, bool use_pagemap, BackupPageHeader2 **headers, - BackupMode backup_mode); -extern int copy_pages(const char *to_fullpath, const char *from_fullpath, - pgFile *file, XLogRecPtr prev_backup_start_lsn, - uint32 checksum_version, bool use_pagemap, - BackupMode backup_mode); - /* FIO */ extern int fio_send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, XLogRecPtr horizonLsn, int calg, int clevel, uint32 checksum_version, diff --git a/src/utils/file.c b/src/utils/file.c index 66faaf70e..ab3a38c80 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -84,6 +84,27 @@ typedef struct uint32 checksumVersion; } fio_lsn_map_request; +typedef struct +{ + size_t from_fullpath_len; + XLogRecPtr start_lsn; + CompressAlg calg; + int clevel; + uint32 checksum_version; + BackupMode backup_mode; + bool strict; + + int64_t file_size; + size_t file_rel_path_len; + size_t file_linked_len; + int file_segno; + bool file_exists_in_prev; + bool file_pagemap_isabsent; + size_t file_bitmapsize; +} fio_iterate_pages_request; + +static void fio_iterate_pages_impl(pioDrive_i drive, int out, const char *from_fullpath, + pgFile *file, fio_iterate_pages_request *params); /* Convert FIO pseudo handle to index in file descriptor array */ #define fio_fileno(f) (((size_t)f - 1) | FIO_PIPE_MARKER) @@ -540,7 +561,6 @@ fio_disconnect(void) if (fio_stdin) { fio_header hdr = (fio_header){.cop = FIO_DISCONNECT}; - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); Assert(hdr.cop == FIO_DISCONNECTED); @@ -2188,6 +2208,76 @@ fio_copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, return n_blocks_read; } +static void +fio_send_pio_err(int out, err_i err) +{ + const char *err_msg = $errmsg(err); + fio_header hdr = {.cop = FIO_PIO_ERROR, .size = strlen(err_msg) + 1, .arg = getErrno(err)}; + + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_write_all(out, err_msg, hdr.size), hdr.size); + + /* We also need to send source location and all the KVs */ +} + +static err_i +fio_receive_pio_err(fio_header *hdr) +{ + int pio_errno = hdr->arg; + char *err_msg = pg_malloc(hdr->size); + + IO_CHECK(fio_read_all(fio_stdin, err_msg, hdr->size), hdr->size); + + return $syserr(pio_errno, err_msg); +} + +static void +fio_iterate_pages_impl(pioDrive_i drive, int out, const char *from_fullpath, + pgFile *file, fio_iterate_pages_request *params) +{ + pioPagesIterator_i pages; + err_i err = $noerr(); + fio_header hdr = {.cop=FIO_ITERATE_DATA}; + + pages = $i(pioIteratePages, drive, .from_fullpath = from_fullpath, .file = file, + .start_lsn = params->start_lsn, .calg = params->calg, .clevel = params->clevel, + .checksum_version = params->checksum_version, .backup_mode = params->backup_mode, + .strict = params->strict, .err = &err); + if ($haserr(err)) + { + fio_send_pio_err(out, err); + return; + } + ft_strbuf_t req = ft_strbuf_zero(); + while (true) + { + PageIteratorValue value; + + err_i err = $i(pioNextPage, pages, &value); + if ($haserr(err)) { + fio_send_pio_err(out, err); + return; + } + if (value.page_result == PageIsTruncated) + break; + + //send page + state + size_t value_size = sizeof(PageIteratorValue) - BLCKSZ + value.compressed_size; + + hdr.size = value_size; + + ft_strbuf_reset_for_reuse(&req); + ft_strbuf_catbytes(&req, ft_bytes(&hdr, sizeof(hdr))); + ft_strbuf_catbytes(&req, ft_bytes(&value, value_size)); + + IO_CHECK(fio_write_all(out, req.ptr, req.len), req.len); + } + ft_strbuf_free(&req); + + hdr = (fio_header){.cop = FIO_ITERATE_EOF}; + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); +} + /* TODO: read file using large buffer * Return codes: * FIO_ERROR: @@ -4043,6 +4133,63 @@ fio_communicate(int in, int out) IO_CHECK(fio_write_all(out, buf + filename_size, hdr.size), hdr.size); } break; + case FIO_ITERATE_PAGES: + { + ft_bytes_t bytes = {.ptr = buf, .len = hdr.size}; + + fio_iterate_pages_request *params; + char *from_fullpath = NULL; + char *rel_path = NULL; + char *linked = NULL; + pgFile *file = NULL; + + params = (fio_iterate_pages_request *)bytes.ptr; + ft_bytes_consume(&bytes, sizeof(fio_iterate_pages_request)); + + if (params->from_fullpath_len) + { + from_fullpath = bytes.ptr; + ft_bytes_consume(&bytes, params->from_fullpath_len); + } + + if (params->file_rel_path_len) + { + // free-d in pgFileFree + rel_path = pgut_malloc(params->file_rel_path_len); + memcpy(rel_path, bytes.ptr, params->file_rel_path_len); + ft_bytes_consume(&bytes, params->file_rel_path_len); + } + + if (params->file_linked_len) + { + // free-d in pgFileFree + linked = pgut_malloc(params->file_linked_len); + memcpy(linked, bytes.ptr, params->file_linked_len); + ft_bytes_consume(&bytes, params->file_linked_len); + } + + file = pgFileInit(rel_path); + + file->size = params->file_size; + file->segno = params->file_segno; + file->exists_in_prev = params->file_exists_in_prev; + file->pagemap_isabsent = params->file_pagemap_isabsent; + file->rel_path = rel_path; + file->linked = linked; + + file->pagemap.bitmapsize = params->file_bitmapsize; + if (params->file_bitmapsize) + { + file->pagemap.bitmap = pgut_malloc(params->file_bitmapsize); + memcpy(file->pagemap.bitmap, bytes.ptr, params->file_bitmapsize); + ft_bytes_consume(&bytes, params->file_bitmapsize); + } + + fio_iterate_pages_impl(drive, out, from_fullpath, file, params); + + pgFileFree(file); + } + break; default: Assert(false); } @@ -4653,6 +4800,23 @@ pioLocalFile_pioWrite(VSelf, ft_bytes_t buf, err_i *err) return r; } +static off_t +pioLocalFile_pioSeek(VSelf, off_t offs, err_i *err) +{ + Self(pioLocalFile); + fobj_reset_err(err); + + ft_assert(self->fd >= 0, "Closed file abused \"%s\"", self->p.path); + + off_t pos = lseek(self->fd, offs, SEEK_SET); + + if (pos == (off_t)-1) + { + *err = $syserr(errno, "Can not seek to {offs} in file {path:q}", offs(offs), path(self->p.path)); + } + return pos; +} + static err_i pioLocalFile_pioWriteFinish(VSelf) { @@ -5341,6 +5505,26 @@ pioRemoteFile_pioWrite(VSelf, ft_bytes_t buf, err_i *err) return buf.len; } +static off_t +pioRemoteFile_pioSeek(VSelf, off_t offs, err_i *err) +{ + Self(pioRemoteFile); + fio_header hdr; + + fobj_reset_err(err); + + ft_assert(self->handle >= 0, "Remote closed file abused \"%s\"", self->p.path); + + hdr.cop = FIO_SEEK; + hdr.handle = self->handle & ~FIO_PIPE_MARKER; + hdr.size = 0; + hdr.arg = offs; + + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + + return 0; +} + static err_i pioRemoteFile_pioWriteFinish(VSelf) { @@ -6248,6 +6432,258 @@ pio_line_reader_getline(pio_line_reader *r, err_i *err) goto retry; } +typedef struct pioRemotePagesIterator +{ + bool valid; +} pioRemotePagesIterator; + +typedef struct pioLocalPagesIterator +{ + bool valid; + BlockNumber blknum; + + bool strict; + pgFile *file; + datapagemap_iterator_t *map_iter; + FILE *in; + const char *from_fullpath; + XLogRecPtr start_lsn; + + CompressAlg calg; + int clevel; + BackupMode backup_mode; + uint32 checksum_version; +} pioLocalPagesIterator; + +#define kls__pioLocalPagesIterator iface__pioPagesIterator, iface(pioPagesIterator), \ + mth(fobjDispose) +fobj_klass(pioLocalPagesIterator); + +#define kls__pioRemotePagesIterator iface__pioPagesIterator, iface(pioPagesIterator) +fobj_klass(pioRemotePagesIterator); + +static pioPagesIterator_i +pioRemoteDrive_pioIteratePages(VSelf, path_t from_fullpath, pgFile *file, + XLogRecPtr start_lsn, CompressAlg calg, int clevel, + uint32 checksum_version, BackupMode backup_mode, + bool strict, err_i *err) +{ + Self(pioRemoteDrive); + fobj_t iter = {0}; + fio_header hdr = {.cop = FIO_ITERATE_PAGES}; + fio_iterate_pages_request params; + memset(¶ms, 0, sizeof(params)); + params = (fio_iterate_pages_request){ + .from_fullpath_len = strlen(from_fullpath)+1, + .start_lsn = start_lsn, + .calg = calg, + .clevel = clevel, + .checksum_version=checksum_version, + .backup_mode = backup_mode, + .strict=strict, + .file_size = file->size, + .file_rel_path_len = file->rel_path?strlen(file->rel_path)+1:0, + .file_linked_len = file->linked?strlen(file->linked)+1:0, + .file_segno = file->segno, + .file_exists_in_prev = file->exists_in_prev, + .file_pagemap_isabsent = file->pagemap_isabsent, + .file_bitmapsize = file->pagemap.bitmapsize + }; + + fobj_reset_err(err); + + if (file->size % BLCKSZ != 0) + elog(WARNING, "File: \"%s\", invalid file size %zu", from_fullpath, file->size); + + size_t total_size = sizeof(hdr) + sizeof(params) + params.from_fullpath_len + + params.file_rel_path_len + params.file_linked_len + params.file_bitmapsize; + + ft_strbuf_t req = ft_strbuf_zero(); + + hdr.size = total_size - sizeof(hdr); + + ft_strbuf_catbytes(&req, ft_bytes(&hdr, sizeof(hdr))); + + ft_strbuf_catbytes(&req, ft_bytes(¶ms, sizeof(fio_iterate_pages_request))); + if(params.from_fullpath_len) + ft_strbuf_catbytes(&req, ft_bytes((char *)from_fullpath, params.from_fullpath_len)); + if(params.file_rel_path_len) + ft_strbuf_catbytes(&req, ft_bytes(file->rel_path, params.file_rel_path_len)); + if(params.file_linked_len) + ft_strbuf_catbytes(&req, ft_bytes(file->linked, params.file_linked_len)); + if(params.file_bitmapsize) + ft_strbuf_catbytes(&req, ft_bytes(file->pagemap.bitmap, params.file_bitmapsize)); + + Assert(req.len == total_size); + Assert(!req.overflowed); + + IO_CHECK(fio_write_all(fio_stdout, req.ptr, req.len), req.len); + + ft_strbuf_free(&req); + + iter = $alloc(pioRemotePagesIterator, .valid = true); + + return bind_pioPagesIterator(iter); +} + +static err_i +pioRemotePagesIterator_pioNextPage(VSelf, PageIteratorValue *value) +{ + Self(pioRemotePagesIterator); + + fio_header hdr; + + if (!self->valid) { + value->page_result = PageIsTruncated; + return $noerr(); + } + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + if (hdr.cop == FIO_PIO_ERROR) + { + self->valid = false; + return fio_receive_pio_err(&hdr); + } + else if (hdr.cop == FIO_ITERATE_EOF) + { + self->valid = false; + value->page_result = PageIsTruncated; + return $noerr(); + } + else if (hdr.cop == FIO_ITERATE_DATA) + { + Assert(hdr.size <= sizeof(PageIteratorValue)); + memset(value, 0, sizeof(PageIteratorValue)); + IO_CHECK(fio_read_all(fio_stdin, (void*)value, hdr.size), hdr.size); + + return $noerr(); + } + self->valid = false; + return $err(RT, "Unexpected operation {intCode} in remote pioNextPage", + intCode(hdr.cop)); +} + +static pioPagesIterator_i +pioLocalDrive_pioIteratePages(VSelf, path_t from_fullpath, pgFile *file, + XLogRecPtr start_lsn, CompressAlg calg, int clevel, + uint32 checksum_version, BackupMode backup_mode, + bool strict, err_i *err) +{ + Self(pioLocalDrive); + fobj_t iter = {0}; + bool use_pagemap; + datapagemap_iterator_t *map_iter = NULL; + FILE *in; + + fobj_reset_err(err); + + if (file->size % BLCKSZ != 0) + elog(WARNING, "File: \"%s\", invalid file size %zu", from_fullpath, file->size); + + /* + * Compute expected number of blocks in the file. + * NOTE This is a normal situation, if the file size has changed + * since the moment we computed it. + */ + file->n_blocks = ft_div_i64u32_to_i32(file->size, BLCKSZ); + + /* + * If page map is empty or file is not present in destination directory, + * then copy backup all pages of the relation. + */ + + if (file->pagemap.bitmapsize == PageBitmapIsEmpty || + file->pagemap_isabsent || !file->exists_in_prev || + !file->pagemap.bitmap) + use_pagemap = false; + else + use_pagemap = true; + + if (use_pagemap) + elog(LOG, "Using pagemap for file \"%s\"", file->rel_path); + + in = fopen(from_fullpath, PG_BINARY_R); + if (!in) + { + pioPagesIterator_i ret = {0}; + *err = $syserr(errno, "Cannot iterate pages"); + return ret; + } + + BlockNumber blknum; + if (use_pagemap) + { + map_iter = datapagemap_iterate(&file->pagemap); + blknum = 0; + } else { + blknum = -1; + } + + iter = $alloc(pioLocalPagesIterator, .valid = true, .blknum = blknum, .strict = strict, + .file = file, .from_fullpath = from_fullpath, .map_iter = map_iter, + .in = in, .start_lsn = start_lsn, .calg = calg, .clevel = clevel, .backup_mode = backup_mode, + .checksum_version = checksum_version); + + return bind_pioPagesIterator(iter); +} + +static void +pioLocalPagesIterator_fobjDispose(VSelf) +{ + Self(pioLocalPagesIterator); + + if (self->map_iter) { + /* Only free iterator. Map itself is not owned by us. */ + pg_free(self->map_iter); + } + if(self->in) fclose(self->in); +} + +static err_i +pioLocalPagesIterator_pioNextPage(VSelf, PageIteratorValue *value) +{ + FOBJ_FUNC_ARP(); + Self(pioLocalPagesIterator); + + while (self->valid) + { + char page_buf[BLCKSZ]; + + /* next block */ + if (self->map_iter) + { + /* exit if pagemap is exhausted */ + if (!datapagemap_next(self->map_iter, &(self->blknum))) + break; + } + else + self->blknum++; + + if (self->blknum >= self->file->n_blocks) + break; + + int rc = prepare_page(self->file, self->start_lsn, self->blknum, + self->in, self->backup_mode, page_buf, self->strict, + self->checksum_version, self->from_fullpath, &value->state); + value->blknum = self->blknum; + value->page_result = rc; + if (rc == PageIsTruncated) + break; + if (rc == PageIsOk) + { + value->compressed_size = compress_page(value->compressed_page, BLCKSZ, + value->blknum, page_buf, self->calg, + self->clevel, self->from_fullpath); + } + return $noerr(); + } + value->page_result = PageIsTruncated; + self->valid = false; + return $noerr(); +} + +fobj_klass_handle(pioLocalPagesIterator); +fobj_klass_handle(pioRemotePagesIterator); + fobj_klass_handle(pioFile); fobj_klass_handle(pioLocalDrive); fobj_klass_handle(pioRemoteDrive); diff --git a/src/utils/file.h b/src/utils/file.h index 8a949fd38..243e7f187 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -71,6 +71,10 @@ typedef enum FIO_FILES_ARE_SAME, FIO_READ_FILE_AT_ONCE, FIO_WRITE_FILE_AT_ONCE, + FIO_PIO_ERROR, /* for sending err_i */ + FIO_ITERATE_PAGES, + FIO_ITERATE_DATA, + FIO_ITERATE_EOF, } fio_operations; typedef struct @@ -198,8 +202,7 @@ extern void db_list_dir(parray *files, const char *root, bool handle_tablespaces bool backup_logs, int external_dir_num); extern void backup_list_dir(parray *files, const char *root); -struct PageState; /* defined in pg_probackup.h */ -extern struct PageState *fio_get_checksum_map(fio_location location, const char *fullpath, uint32 checksum_version, +extern PageState *fio_get_checksum_map(fio_location location, const char *fullpath, uint32 checksum_version, int n_blocks, XLogRecPtr dest_stop_lsn, BlockNumber segmentno); struct datapagemap; /* defined in datapagemap.h */ extern struct datapagemap *fio_get_lsn_map(fio_location location, const char *fullpath, uint32 checksum_version, @@ -228,6 +231,7 @@ fobj_error_int_key(writtenSz); fobj_error_int_key(wantedSz); fobj_error_int_key(size); fobj_error_cstr_key(kind); +fobj_error_int_key(offs); #ifdef HAVE_LIBZ fobj_error_kind(GZ); @@ -242,13 +246,16 @@ fobj_error_cstr_key(gzErrStr); #define mth__pioWrite size_t, (ft_bytes_t, buf), (err_i *, err) #define mth__pioTruncate err_i, (size_t, sz) #define mth__pioWriteFinish err_i +#define mth__pioSeek off_t, (off_t, offs), (err_i *, err) + fobj_method(pioClose); fobj_method(pioRead); fobj_method(pioWrite); fobj_method(pioTruncate); fobj_method(pioWriteFinish); +fobj_method(pioSeek); -#define iface__pioFile mth(pioWrite, pioWriteFinish, pioRead, pioTruncate, pioClose) +#define iface__pioFile mth(pioWrite, pioWriteFinish, pioRead, pioTruncate, pioClose, pioSeek) #define iface__pioWriteFlush mth(pioWrite, pioWriteFinish) #define iface__pioWriteCloser mth(pioWrite, pioWriteFinish, pioClose) #define iface__pioReadCloser mth(pioRead, pioClose) @@ -257,6 +264,25 @@ fobj_iface(pioWriteFlush); fobj_iface(pioWriteCloser); fobj_iface(pioReadCloser); +// Pages iterator +typedef struct +{ + PageState state; + BlockNumber blknum; + int page_result; + int compression; + size_t compressed_size; + char compressed_page[BLCKSZ]; /* MUST be last */ +} PageIteratorValue; + +#define mth__pioNextPage err_i, (PageIteratorValue *, value) + +fobj_method(pioNextPage); + +#define iface__pioPagesIterator mth(pioNextPage) + +fobj_iface(pioPagesIterator); + // Drive #define mth__pioOpen pioFile_i, (path_t, path), (int, flags), \ (int, permissions), (err_i *, err) @@ -284,6 +310,12 @@ fobj_iface(pioReadCloser); #define mth__pioWriteFile err_i, (path_t, path), (ft_bytes_t, content), (bool, binary) #define mth__pioWriteFile__optional() (binary, true) +#define mth__pioIteratePages pioPagesIterator_i, (path_t, from_fullpath), \ + (pgFile *, file), (XLogRecPtr, start_lsn), (CompressAlg, calg), (int, clevel), \ + (uint32, checksum_version), (BackupMode, backup_mode), (bool, strict), \ + (err_i *, err) +fobj_method(pioIteratePages); + fobj_method(pioOpen); fobj_method(pioStat); fobj_method(pioRemove); @@ -301,7 +333,8 @@ fobj_method(pioWriteFile); #define iface__pioDrive mth(pioOpen, pioStat, pioRemove, pioRename), \ mth(pioExists, pioGetCRC32, pioIsRemote), \ mth(pioMakeDir, pioListDir, pioRemoveDir), \ - mth(pioFilesAreSame, pioReadFile, pioWriteFile) + mth(pioFilesAreSame, pioReadFile, pioWriteFile), \ + mth(pioIteratePages) fobj_iface(pioDrive); extern pioDrive_i pioDriveForLocation(fio_location location); diff --git a/tests/backup_test.py b/tests/backup_test.py index 4d348ecee..b8c15ff66 100644 --- a/tests/backup_test.py +++ b/tests/backup_test.py @@ -1721,7 +1721,7 @@ def test_basic_missing_file_permissions(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertIn( - 'ERROR: Cannot open file', + 'ERROR: send_pages: Cannot iterate pages: Permission denied', e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) From 9c0dd85c1fde410e5cffdde9d8edcaff184e474a Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 8 Dec 2022 10:03:27 +0300 Subject: [PATCH 140/339] [PBCKP-338] simplify backup_page and its usage. And use pioCRC32Counter to calculate crc32 --- src/data.c | 74 ++++++++++++++++++++++++++---------------------------- 1 file changed, 35 insertions(+), 39 deletions(-) diff --git a/src/data.c b/src/data.c index 0039a6df8..e66006c1f 100644 --- a/src/data.c +++ b/src/data.c @@ -450,41 +450,24 @@ compress_page(char *write_buffer, size_t buffer_size, BlockNumber blknum, void * } static int -backup_page(pgFile *file, BlockNumber blknum, pioFile_i out, - pg_crc32 *crc, void *compressed_page, size_t compressed_size, CompressAlg calg, int clevel, - const char *from_fullpath, const char *to_fullpath) +backup_page(pioWrite_i out, BlockNumber blknum, ft_bytes_t page, const char *to_fullpath) { - size_t write_buffer_size = 0; - char write_buffer[BLCKSZ*2]; /* compressed page may require more space than uncompressed */ - BackupPageHeader* bph = (BackupPageHeader*)write_buffer; + BackupPageHeader bph; err_i err = $noerr(); - size_t rc; - - memcpy(write_buffer + sizeof(BackupPageHeader), compressed_page, compressed_size); - file->compress_alg = calg; /* TODO: wtf? why here? */ + bph.block = blknum; + bph.compressed_size = page.len; - bph->block = blknum; - bph->compressed_size = compressed_size; - write_buffer_size = compressed_size + sizeof(BackupPageHeader); - - /* Update CRC */ - COMP_CRC32C(*crc, write_buffer, write_buffer_size); + $i(pioWrite, out, .buf = ft_bytes(&bph, sizeof(bph)), .err = &err); + if ($haserr(err)) + ft_logerr(ERROR, $errmsg(err), "Write page header in backup_page"); /* write data page */ - rc = $i(pioWrite, out, .buf = ft_bytes(write_buffer, write_buffer_size), .err = &err); + $i(pioWrite, out, .buf = page, .err = &err); if ($haserr(err)) - { ft_logerr(ERROR, $errmsg(err), "Write error in compress and backup"); - } - if (rc != write_buffer_size) - elog(ERROR, "File: \"%s\", cannot write at block %u: %s", - to_fullpath, blknum, strerror(errno)); - - file->write_size += write_buffer_size; - file->uncompressed_size += BLCKSZ; - return compressed_size; + return sizeof(bph) + page.len; } /* Write page as-is. TODO: make it fastpath option in compress_and_backup_page() */ @@ -576,7 +559,7 @@ backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpat file->read_size = 0; file->write_size = 0; file->uncompressed_size = 0; - INIT_CRC32C(file->crc); + file->crc = 0; /* crc of empty file is 0 */ /* * Read each page, verify checksum and write it to backup. @@ -653,9 +636,6 @@ backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpat cleanup: - /* finish CRC calculation */ - FIN_CRC32C(file->crc); - /* dump page headers */ write_page_headers(headers, file, hdr_map, is_merge); @@ -2019,8 +1999,8 @@ send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, pioPagesIterator_i pages; int n_blocks_read = 0; pioFile_i out = $null(pioFile); - off_t cur_pos_out = 0; - int compressed_size = 0; + pioWriteFlush_i wrapped = $null(pioWriteFlush); + pioCRC32Counter *crc32 = NULL; BackupPageHeader2 *header = NULL; parray *harray = NULL; err_i err = $noerr(); @@ -2057,21 +2037,27 @@ send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, { ft_logerr(FT_FATAL, $errmsg(err), "Cannot write file"); } + crc32 = pioCRC32Counter_alloc(); + wrapped = pioWrapWriteFilter($reduce(pioWriteFlush, out), + $bind(pioFilter, crc32), + BLCKSZ + sizeof(BackupPageHeader)); + file->compress_alg = calg; } header = pgut_new0(BackupPageHeader2); *header = (BackupPageHeader2){ .block = value.blknum, - .pos = cur_pos_out, + .pos = file->write_size, .lsn = value.state.lsn, .checksum = value.state.checksum, }; parray_append(harray, header); - compressed_size = backup_page(file, value.blknum, out, &(file->crc), - value.compressed_page, value.compressed_size, calg, clevel, - from_fullpath, to_fullpath); - cur_pos_out += compressed_size + sizeof(BackupPageHeader); + file->write_size += backup_page($reduce(pioWrite, wrapped), value.blknum, + ft_bytes(value.compressed_page, value.compressed_size), + to_fullpath); + file->uncompressed_size += BLCKSZ; + } n_blocks_read++; } @@ -2093,13 +2079,23 @@ send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, (*headers)[i] = *header; pg_free(header); } - (*headers)[hdr_num] = (BackupPageHeader2){.pos=cur_pos_out}; + (*headers)[hdr_num] = (BackupPageHeader2){.pos=file->write_size}; } parray_free(harray); /* close local output file */ if ($notNULL(out)) - $i(pioClose, out, true); + { + err = $i(pioWriteFinish, wrapped); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Finish write backup file"); + file->crc = pioCRC32Counter_getCRC32(crc32); + ft_dbg_assert(file->write_size == pioCRC32Counter_getSize(crc32)); + + err = $i(pioClose, out, true); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Close write backup file"); + } return n_blocks_read; } From 1cb72dc320159fb39e179fc81ce27578d6196c2c Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 8 Dec 2022 11:45:54 +0300 Subject: [PATCH 141/339] [PBCKP-338] move `prepare_page` to file.c, since it is single call place. --- src/data.c | 153 ------------------------------------------ src/pg_probackup.h | 8 --- src/utils/file.c | 162 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 162 insertions(+), 161 deletions(-) diff --git a/src/data.c b/src/data.c index e66006c1f..ee32be490 100644 --- a/src/data.c +++ b/src/data.c @@ -268,159 +268,6 @@ get_checksum_errormsg(Page page, char **errormsg, BlockNumber absolute_blkno) pg_checksum_page(page, absolute_blkno)); } -/* - * Retrieves a page taking the backup mode into account - * and writes it into argument "page". Argument "page" - * should be a pointer to allocated BLCKSZ of bytes. - * - * Prints appropriate warnings/errors/etc into log. - * Returns: - * PageIsOk(0) if page was successfully retrieved - * PageIsTruncated(-1) if the page was truncated - * SkipCurrentPage(-2) if we need to skip this page, - * only used for DELTA and PTRACK backup - * PageIsCorrupted(-3) if the page checksum mismatch - * or header corruption, - * only used for checkdb - * TODO: probably we should always - * return it to the caller - */ -int32 -prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn, - BlockNumber blknum, FILE *in, - BackupMode backup_mode, - Page page, bool strict, - uint32 checksum_version, - const char *from_fullpath, - PageState *page_st) -{ - int try_again = PAGE_READ_ATTEMPTS; - bool page_is_valid = false; - BlockNumber absolute_blknum = file->segno * RELSEG_SIZE + blknum; - int rc = 0; - - /* check for interrupt */ - if (interrupted || thread_interrupted) - elog(ERROR, "Interrupted during page reading"); - - /* - * Read the page and verify its header and checksum. - * Under high write load it's possible that we've read partly - * flushed page, so try several times before throwing an error. - */ - while (!page_is_valid && try_again--) - { - /* read the block */ - int read_len = fio_pread(in, page, blknum * BLCKSZ); - - /* The block could have been truncated. It is fine. */ - if (read_len == 0) - { - elog(VERBOSE, "Cannot read block %u of \"%s\": " - "block truncated", blknum, from_fullpath); - return PageIsTruncated; - } - else if (read_len < 0) - elog(ERROR, "Cannot read block %u of \"%s\": %s", - blknum, from_fullpath, strerror(errno)); - else if (read_len != BLCKSZ) - elog(WARNING, "Cannot read block %u of \"%s\": " - "read %i of %d, try again", - blknum, from_fullpath, read_len, BLCKSZ); - else - { - /* We have BLCKSZ of raw data, validate it */ - rc = validate_one_page(page, absolute_blknum, - InvalidXLogRecPtr, page_st, - checksum_version); - switch (rc) - { - case PAGE_IS_ZEROED: - elog(VERBOSE, "File: \"%s\" blknum %u, empty page", from_fullpath, blknum); - return PageIsOk; - - case PAGE_IS_VALID: - /* in DELTA or PTRACK modes we must compare lsn */ - if (backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) - page_is_valid = true; - else - return PageIsOk; - break; - - case PAGE_HEADER_IS_INVALID: - elog(VERBOSE, "File: \"%s\" blknum %u have wrong page header, try again", - from_fullpath, blknum); - break; - - case PAGE_CHECKSUM_MISMATCH: - elog(VERBOSE, "File: \"%s\" blknum %u have wrong checksum, try again", - from_fullpath, blknum); - break; - default: - Assert(false); - } - } - /* avoid re-reading once buffered data, flushing on further attempts, see PBCKP-150 */ - fflush(in); - } - - /* - * If page is not valid after PAGE_READ_ATTEMPTS attempts to read it - * throw an error. - */ - if (!page_is_valid) - { - int elevel = ERROR; - char *errormsg = NULL; - - /* Get the details of corruption */ - if (rc == PAGE_HEADER_IS_INVALID) - get_header_errormsg(page, &errormsg); - else if (rc == PAGE_CHECKSUM_MISMATCH) - get_checksum_errormsg(page, &errormsg, - file->segno * RELSEG_SIZE + blknum); - - /* Error out in case of merge or backup without ptrack support; - * issue warning in case of checkdb or backup with ptrack support - */ - if (!strict) - elevel = WARNING; - - if (errormsg) - elog(elevel, "Corruption detected in file \"%s\", block %u: %s", - from_fullpath, blknum, errormsg); - else - elog(elevel, "Corruption detected in file \"%s\", block %u", - from_fullpath, blknum); - - pg_free(errormsg); - return PageIsCorrupted; - } - - /* Checkdb not going futher */ - if (!strict) - return PageIsOk; - - /* - * Skip page if page lsn is less than START_LSN of parent backup. - * Nullified pages must be copied by DELTA backup, just to be safe. - */ - if ((backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) && - file->exists_in_prev && - page_st->lsn > 0 && - page_st->lsn < prev_backup_start_lsn) - { - elog(VERBOSE, "Skipping blknum %u in file: \"%s\", file->exists_in_prev: %s, page_st->lsn: %X/%X, prev_backup_start_lsn: %X/%X", - blknum, from_fullpath, - file->exists_in_prev ? "true" : "false", - (uint32) (page_st->lsn >> 32), (uint32) page_st->lsn, - (uint32) (prev_backup_start_lsn >> 32), (uint32) prev_backup_start_lsn); - return SkipCurrentPage; - } - - return PageIsOk; -} - int compress_page(char *write_buffer, size_t buffer_size, BlockNumber blknum, void *page, CompressAlg calg, int clevel, const char *from_fullpath) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index b663c69ae..5faf49b7b 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1048,14 +1048,6 @@ extern void backup_non_data_file_internal(const char *from_fullpath, const char *to_fullpath, pgFile *file, bool missing_ok); -extern int32 prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn, - BlockNumber blknum, FILE *in, - BackupMode backup_mode, - Page page, bool strict, - uint32 checksum_version, - const char *from_fullpath, - PageState *page_st); - extern size_t restore_data_file(parray *parent_chain, pgFile *dest_file, FILE *out, const char *to_fullpath, bool use_bitmap, PageState *checksum_map, XLogRecPtr shift_lsn, datapagemap_t *lsn_map, bool use_headers); diff --git a/src/utils/file.c b/src/utils/file.c index ab3a38c80..6ae48f54d 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -121,6 +121,14 @@ static void dir_list_file_internal(parray *files, pgFile *parent, const char *pa bool handle_tablespaces, bool follow_symlink, bool backup_logs, bool skip_hidden, int external_dir_num, pioDrive_i drive); +static int32 prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn, + BlockNumber blknum, FILE *in, + BackupMode backup_mode, + Page page, bool strict, + uint32 checksum_version, + const char *from_fullpath, + PageState *page_st); + void setMyLocation(ProbackupSubcmd const subcmd) { @@ -6681,6 +6689,160 @@ pioLocalPagesIterator_pioNextPage(VSelf, PageIteratorValue *value) return $noerr(); } +/* + * Retrieves a page taking the backup mode into account + * and writes it into argument "page". Argument "page" + * should be a pointer to allocated BLCKSZ of bytes. + * + * Prints appropriate warnings/errors/etc into log. + * Returns: + * PageIsOk(0) if page was successfully retrieved + * PageIsTruncated(-1) if the page was truncated + * SkipCurrentPage(-2) if we need to skip this page, + * only used for DELTA and PTRACK backup + * PageIsCorrupted(-3) if the page checksum mismatch + * or header corruption, + * only used for checkdb + * TODO: probably we should always + * return it to the caller + */ +int32 +prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn, + BlockNumber blknum, FILE *in, + BackupMode backup_mode, + Page page, bool strict, + uint32 checksum_version, + const char *from_fullpath, + PageState *page_st) +{ + int try_again = PAGE_READ_ATTEMPTS; + bool page_is_valid = false; + BlockNumber absolute_blknum = file->segno * RELSEG_SIZE + blknum; + int rc = 0; + + /* check for interrupt */ + if (interrupted || thread_interrupted) + elog(ERROR, "Interrupted during page reading"); + + /* + * Read the page and verify its header and checksum. + * Under high write load it's possible that we've read partly + * flushed page, so try several times before throwing an error. + */ + while (!page_is_valid && try_again--) + { + /* read the block */ + int read_len = fio_pread(in, page, blknum * BLCKSZ); + + /* The block could have been truncated. It is fine. */ + if (read_len == 0) + { + elog(VERBOSE, "Cannot read block %u of \"%s\": " + "block truncated", blknum, from_fullpath); + return PageIsTruncated; + } + else if (read_len < 0) + elog(ERROR, "Cannot read block %u of \"%s\": %s", + blknum, from_fullpath, strerror(errno)); + else if (read_len != BLCKSZ) + elog(WARNING, "Cannot read block %u of \"%s\": " + "read %i of %d, try again", + blknum, from_fullpath, read_len, BLCKSZ); + else + { + /* We have BLCKSZ of raw data, validate it */ + rc = validate_one_page(page, absolute_blknum, + InvalidXLogRecPtr, page_st, + checksum_version); + switch (rc) + { + case PAGE_IS_ZEROED: + elog(VERBOSE, "File: \"%s\" blknum %u, empty page", from_fullpath, blknum); + return PageIsOk; + + case PAGE_IS_VALID: + /* in DELTA or PTRACK modes we must compare lsn */ + if (backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) + page_is_valid = true; + else + return PageIsOk; + break; + + case PAGE_HEADER_IS_INVALID: + elog(VERBOSE, "File: \"%s\" blknum %u have wrong page header, try again", + from_fullpath, blknum); + break; + + case PAGE_CHECKSUM_MISMATCH: + elog(VERBOSE, "File: \"%s\" blknum %u have wrong checksum, try again", + from_fullpath, blknum); + break; + default: + Assert(false); + } + } + /* avoid re-reading once buffered data, flushing on further attempts, see PBCKP-150 */ + fflush(in); + } + + /* + * If page is not valid after PAGE_READ_ATTEMPTS attempts to read it + * throw an error. + */ + if (!page_is_valid) + { + int elevel = ERROR; + char *errormsg = NULL; + + /* Get the details of corruption */ + if (rc == PAGE_HEADER_IS_INVALID) + get_header_errormsg(page, &errormsg); + else if (rc == PAGE_CHECKSUM_MISMATCH) + get_checksum_errormsg(page, &errormsg, + file->segno * RELSEG_SIZE + blknum); + + /* Error out in case of merge or backup without ptrack support; + * issue warning in case of checkdb or backup with ptrack support + */ + if (!strict) + elevel = WARNING; + + if (errormsg) + elog(elevel, "Corruption detected in file \"%s\", block %u: %s", + from_fullpath, blknum, errormsg); + else + elog(elevel, "Corruption detected in file \"%s\", block %u", + from_fullpath, blknum); + + pg_free(errormsg); + return PageIsCorrupted; + } + + /* Checkdb not going futher */ + if (!strict) + return PageIsOk; + + /* + * Skip page if page lsn is less than START_LSN of parent backup. + * Nullified pages must be copied by DELTA backup, just to be safe. + */ + if ((backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) && + file->exists_in_prev && + page_st->lsn > 0 && + page_st->lsn < prev_backup_start_lsn) + { + elog(VERBOSE, "Skipping blknum %u in file: \"%s\", file->exists_in_prev: %s, page_st->lsn: %X/%X, prev_backup_start_lsn: %X/%X", + blknum, from_fullpath, + file->exists_in_prev ? "true" : "false", + (uint32) (page_st->lsn >> 32), (uint32) page_st->lsn, + (uint32) (prev_backup_start_lsn >> 32), (uint32) prev_backup_start_lsn); + return SkipCurrentPage; + } + + return PageIsOk; +} + + fobj_klass_handle(pioLocalPagesIterator); fobj_klass_handle(pioRemotePagesIterator); From b765af4059d0a8087dd88a436627e1d5d5808586 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 8 Dec 2022 11:52:48 +0300 Subject: [PATCH 142/339] [PBCKP-338] Some simplifications - format pioLocalPagesIterator allocation - reduce prepare_page signature - simplify send_pages and its usage - same for copy_pages - simplify iterator initialization --- src/data.c | 204 ++++++++++++++----------------------------- src/datapagemap.c | 31 ++++++- src/datapagemap.h | 1 + src/utils/file.c | 117 ++++++++++++------------- src/utils/file.h | 1 + tests/backup_test.py | 15 +--- 6 files changed, 152 insertions(+), 217 deletions(-) diff --git a/src/data.c b/src/data.c index ee32be490..32a44be15 100644 --- a/src/data.c +++ b/src/data.c @@ -36,12 +36,12 @@ typedef struct DataPage static bool get_page_header(FILE *in, const char *fullpath, BackupPageHeader *bph, pg_crc32 *crc, uint32 backup_version); -static int send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, +static err_i send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, XLogRecPtr prev_backup_start_lsn, CompressAlg calg, int clevel, uint32 checksum_version, BackupPageHeader2 **headers, BackupMode backup_mode); -static int copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, +static err_i copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, XLogRecPtr sync_lsn, uint32 checksum_version, BackupMode backup_mode); @@ -296,25 +296,23 @@ compress_page(char *write_buffer, size_t buffer_size, BlockNumber blknum, void * return compressed_size; } -static int -backup_page(pioWrite_i out, BlockNumber blknum, ft_bytes_t page, const char *to_fullpath) +static size_t +backup_page(pioWrite_i out, BlockNumber blknum, ft_bytes_t page, + const char *to_fullpath, err_i *err) { BackupPageHeader bph; - err_i err = $noerr(); + size_t n; + fobj_reset_err(err); bph.block = blknum; bph.compressed_size = page.len; - $i(pioWrite, out, .buf = ft_bytes(&bph, sizeof(bph)), .err = &err); - if ($haserr(err)) - ft_logerr(ERROR, $errmsg(err), "Write page header in backup_page"); + n = $i(pioWrite, out, .buf = ft_bytes(&bph, sizeof(bph)), .err = err); + if ($haserr(*err)) + return n; /* write data page */ - $i(pioWrite, out, .buf = page, .err = &err); - if ($haserr(err)) - ft_logerr(ERROR, $errmsg(err), "Write error in compress and backup"); - - return sizeof(bph) + page.len; + return n + $i(pioWrite, out, .buf = page, .err = err); } /* Write page as-is. TODO: make it fastpath option in compress_and_backup_page() */ @@ -367,11 +365,9 @@ backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpat CompressAlg calg, int clevel, uint32 checksum_version, HeaderMap *hdr_map, bool is_merge) { - int rc; - char *errmsg = NULL; - BlockNumber err_blknum = 0; /* page headers */ BackupPageHeader2 *headers = NULL; + err_i err = $noerr(); /* sanity */ if (file->size % BLCKSZ != 0) @@ -422,49 +418,22 @@ backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpat XLogRecPtr start_lsn = (backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) && file->exists_in_prev ? prev_backup_start_lsn : InvalidXLogRecPtr; /* TODO: stop handling errors internally */ - rc = send_pages(to_fullpath, from_fullpath, file, start_lsn, + err = send_pages(to_fullpath, from_fullpath, file, start_lsn, calg, clevel, checksum_version, &headers, backup_mode); - /* check for errors */ - if (rc == FILE_MISSING) - { - elog(is_merge ? ERROR : LOG, "File not found: \"%s\"", from_fullpath); - file->write_size = FILE_NOT_FOUND; - goto cleanup; - } - - else if (rc == WRITE_FAILED) - elog(ERROR, "Cannot write block %u of \"%s\": %s", - err_blknum, to_fullpath, strerror(errno)); - - else if (rc == PAGE_CORRUPTION) - { - if (errmsg) - elog(ERROR, "Corruption detected in file \"%s\", block %u: %s", - from_fullpath, err_blknum, errmsg); - else - elog(ERROR, "Corruption detected in file \"%s\", block %u", - from_fullpath, err_blknum); - } - /* OPEN_FAILED and READ_FAILED */ - else if (rc == OPEN_FAILED) - { - if (errmsg) - elog(ERROR, "%s", errmsg); - else - elog(ERROR, "Cannot open file \"%s\"", from_fullpath); - } - else if (rc == READ_FAILED) + if ($haserr(err)) { - if (errmsg) - elog(ERROR, "%s", errmsg); - else - elog(ERROR, "Cannot read file \"%s\"", from_fullpath); + if (getErrno(err) == ENOENT) + { + elog(is_merge ? ERROR : LOG, "File not found: \"%s\"", + from_fullpath); + file->write_size = FILE_NOT_FOUND; + goto cleanup; + } + ft_logerr(FT_FATAL, $errmsg(err), "Copying data file \"%s\"", file->rel_path); } - file->read_size = (int64_t)rc * BLCKSZ; - /* refresh n_blocks for FULL and DELTA */ if (backup_mode == BACKUP_MODE_FULL || backup_mode == BACKUP_MODE_DIFF_DELTA) @@ -486,7 +455,6 @@ backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpat /* dump page headers */ write_page_headers(headers, file, hdr_map, is_merge); - pg_free(errmsg); pg_free(file->pagemap.bitmap); pg_free(headers); } @@ -503,9 +471,7 @@ catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpa XLogRecPtr sync_lsn, BackupMode backup_mode, uint32 checksum_version, int64_t prev_size) { - int rc; - char *errmsg = NULL; - BlockNumber err_blknum = 0; + err_i err = $noerr(); /* * Compute expected number of blocks in the file. @@ -539,48 +505,19 @@ catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpa XLogRecPtr start_lsn = ((backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) && file->exists_in_prev) ? sync_lsn : InvalidXLogRecPtr; /* TODO: stop handling errors internally */ - rc = copy_pages(to_fullpath, from_fullpath, file, start_lsn, + err = copy_pages(to_fullpath, from_fullpath, file, start_lsn, checksum_version, backup_mode); - - /* check for errors */ - if (rc == FILE_MISSING) - { - elog(LOG, "File not found: \"%s\"", from_fullpath); - file->write_size = FILE_NOT_FOUND; - goto cleanup; - } - - else if (rc == WRITE_FAILED) - elog(ERROR, "Cannot write block %u of \"%s\": %s", - err_blknum, to_fullpath, strerror(errno)); - - else if (rc == PAGE_CORRUPTION) - { - if (errmsg) - elog(ERROR, "Corruption detected in file \"%s\", block %u: %s", - from_fullpath, err_blknum, errmsg); - else - elog(ERROR, "Corruption detected in file \"%s\", block %u", - from_fullpath, err_blknum); - } - /* OPEN_FAILED and READ_FAILED */ - else if (rc == OPEN_FAILED) - { - if (errmsg) - elog(ERROR, "%s", errmsg); - else - elog(ERROR, "Cannot open file \"%s\"", from_fullpath); - } - else if (rc == READ_FAILED) + if ($haserr(err)) { - if (errmsg) - elog(ERROR, "%s", errmsg); - else - elog(ERROR, "Cannot read file \"%s\"", from_fullpath); + if (getErrno(err) == ENOENT) + { + elog(LOG, "File not found: \"%s\"", from_fullpath); + file->write_size = FILE_NOT_FOUND; + goto cleanup; + } + ft_logerr(FT_FATAL, $errmsg(err), "Copying file \"%s\"", file->rel_path); } - file->read_size = (int64_t)rc * BLCKSZ; - /* Determine that file didn`t changed in case of incremental catchup */ if (backup_mode != BACKUP_MODE_FULL && file->exists_in_prev && @@ -591,7 +528,6 @@ catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpa } cleanup: - pg_free(errmsg); pg_free(file->pagemap.bitmap); } @@ -1834,7 +1770,7 @@ open_local_file_rw(const char *to_fullpath, char **out_buf, uint32 buf_size) } /* backup local file */ -static int +static err_i send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, XLogRecPtr prev_backup_start_lsn, CompressAlg calg, int clevel, uint32 checksum_version, BackupPageHeader2 **headers, @@ -1844,7 +1780,6 @@ send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, pioDrive_i backup_location = pioDriveForLocation(FIO_BACKUP_HOST); pioDrive_i db_location = pioDriveForLocation(FIO_DB_HOST); pioPagesIterator_i pages; - int n_blocks_read = 0; pioFile_i out = $null(pioFile); pioWriteFlush_i wrapped = $null(pioWriteFlush); pioCRC32Counter *crc32 = NULL; @@ -1857,22 +1792,15 @@ send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, .checksum_version = checksum_version, .backup_mode = backup_mode, .strict = true, .err = &err); if ($haserr(err)) - { - if (getErrno(err) == ENOENT) - return FILE_MISSING; - ft_logerr(FT_FATAL, $errmsg(err), "send_pages"); - return OPEN_FAILED; - } + return $iresult(err); harray = parray_new(); while (true) { PageIteratorValue value; err_i err = $i(pioNextPage, pages, &value); - if ($haserr(err)) { - ft_logerr(FT_FATAL, $errmsg(err), "sending data file pages"); - return READ_FAILED; - } + if ($haserr(err)) + return $iresult(err); if (value.page_result == PageIsTruncated) break; @@ -1881,9 +1809,7 @@ send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, { out = $i(pioOpen, backup_location, to_fullpath, PG_BINARY|O_CREAT|O_RDWR, 0, &err); if ($haserr(err)) - { - ft_logerr(FT_FATAL, $errmsg(err), "Cannot write file"); - } + return $iresult(err); crc32 = pioCRC32Counter_alloc(); wrapped = pioWrapWriteFilter($reduce(pioWriteFlush, out), $bind(pioFilter, crc32), @@ -1900,13 +1826,21 @@ send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, }; parray_append(harray, header); + file->uncompressed_size += BLCKSZ; file->write_size += backup_page($reduce(pioWrite, wrapped), value.blknum, ft_bytes(value.compressed_page, value.compressed_size), - to_fullpath); - file->uncompressed_size += BLCKSZ; + to_fullpath, &err); + if ($haserr(err)) + return $iresult(err); + } + if (value.page_result == PageIsCorrupted) + { + err = $err(RT, "Page %d is corrupted", + blknum(value.blknum)); + return $iresult(err); } - n_blocks_read++; + file->read_size += BLCKSZ; } /* @@ -1935,29 +1869,28 @@ send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, { err = $i(pioWriteFinish, wrapped); if ($haserr(err)) - ft_logerr(FT_FATAL, $errmsg(err), "Finish write backup file"); + return $iresult(err); file->crc = pioCRC32Counter_getCRC32(crc32); ft_dbg_assert(file->write_size == pioCRC32Counter_getSize(crc32)); err = $i(pioClose, out, true); if ($haserr(err)) - ft_logerr(FT_FATAL, $errmsg(err), "Close write backup file"); + return $iresult(err); } - return n_blocks_read; + return $noerr(); } /* * Copy data file just as send_pages but without attaching additional header and compression */ -static int +static err_i copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, XLogRecPtr sync_lsn, uint32 checksum_version, BackupMode backup_mode) { FOBJ_FUNC_ARP(); pioDrive_i backup_location = pioDriveForLocation(FIO_BACKUP_HOST); - int n_blocks_read = 0; err_i err = $noerr(); pioPagesIterator_i pages; pioFile_i out; @@ -1967,27 +1900,19 @@ copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, .checksum_version = checksum_version, .backup_mode = backup_mode, .strict = true, .err = &err); if ($haserr(err)) - { - if (getErrno(err) == ENOENT) - return FILE_MISSING; - ft_logerr(FT_FATAL, $errmsg(err), "Cannot iterate pages"); - return OPEN_FAILED; - } + return $iresult(err); out = $i(pioOpen, backup_location, to_fullpath, PG_BINARY|O_RDWR|O_CREAT, file->mode, &err); if ($haserr(err)) - { - ft_logerr(FT_FATAL, $errmsg(err), "Cannot write output file"); - } + return $iresult(err); while (true) { PageIteratorValue value; - err_i err = $i(pioNextPage, pages, &value); - if ($haserr(err)) { - ft_logerr(FT_FATAL, $errmsg(err), "copying data file pages"); - return READ_FAILED; - } + err = $i(pioNextPage, pages, &value); + if ($haserr(err)) + return $iresult(err); + if (value.page_result == PageIsTruncated) break; @@ -1996,19 +1921,22 @@ copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, write_page(file, out, value.blknum, value.compressed_page); } - n_blocks_read++; + if (value.page_result == PageIsCorrupted) { + elog(WARNING, "Page %d of \"%s\" is corrupted", + value.blknum, file->rel_path); + } + + file->read_size += BLCKSZ; } err = $i(pioTruncate, out, file->size); if ($haserr(err)) - { - ft_logerr(FT_ERROR, $errmsg(err), "truncate in copy_pages"); - } + return $iresult(err); /* close local output file */ $i(pioClose, out, true); - return n_blocks_read; + return $noerr(); } /* diff --git a/src/datapagemap.c b/src/datapagemap.c index 7e4202a72..6c7096f37 100644 --- a/src/datapagemap.c +++ b/src/datapagemap.c @@ -16,7 +16,7 @@ struct datapagemap_iterator { - datapagemap_t *map; + datapagemap_t map; BlockNumber nextblkno; }; @@ -65,6 +65,31 @@ datapagemap_add(datapagemap_t *map, BlockNumber blkno) map->bitmap[offset] |= (1 << bitno); } +bool +datapagemap_first(datapagemap_t map, BlockNumber *start_and_result) +{ + BlockNumber blk = *start_and_result; + for (;;) + { + int nextoff = blk / 8; + int bitno = blk % 8; + + if (nextoff >= map.bitmapsize) + break; + + if (map.bitmap[nextoff] & (1 << bitno)) + { + *start_and_result = blk; + return true; + } + blk++; + } + + /* no more set bits in this bitmap. */ + *start_and_result = UINT32_MAX; + return false; +} + /* * Start iterating through all entries in the page map. * @@ -78,7 +103,7 @@ datapagemap_iterate(datapagemap_t *map) datapagemap_iterator_t *iter; iter = pg_malloc(sizeof(datapagemap_iterator_t)); - iter->map = map; + iter->map = *map; iter->nextblkno = 0; return iter; @@ -87,7 +112,7 @@ datapagemap_iterate(datapagemap_t *map) bool datapagemap_next(datapagemap_iterator_t *iter, BlockNumber *blkno) { - datapagemap_t *map = iter->map; + datapagemap_t *map = &iter->map; for (;;) { diff --git a/src/datapagemap.h b/src/datapagemap.h index 6ad7a6204..cff243362 100644 --- a/src/datapagemap.h +++ b/src/datapagemap.h @@ -28,6 +28,7 @@ typedef struct datapagemap datapagemap_t; typedef struct datapagemap_iterator datapagemap_iterator_t; extern void datapagemap_add(datapagemap_t *map, BlockNumber blkno); +extern bool datapagemap_first(datapagemap_t map, BlockNumber *start_and_result); extern datapagemap_iterator_t *datapagemap_iterate(datapagemap_t *map); extern bool datapagemap_next(datapagemap_iterator_t *iter, BlockNumber *blkno); diff --git a/src/utils/file.c b/src/utils/file.c index 6ae48f54d..6a337359c 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -121,14 +121,6 @@ static void dir_list_file_internal(parray *files, pgFile *parent, const char *pa bool handle_tablespaces, bool follow_symlink, bool backup_logs, bool skip_hidden, int external_dir_num, pioDrive_i drive); -static int32 prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn, - BlockNumber blknum, FILE *in, - BackupMode backup_mode, - Page page, bool strict, - uint32 checksum_version, - const char *from_fullpath, - PageState *page_st); - void setMyLocation(ProbackupSubcmd const subcmd) { @@ -6447,19 +6439,19 @@ typedef struct pioRemotePagesIterator typedef struct pioLocalPagesIterator { - bool valid; BlockNumber blknum; + BlockNumber n_blocks; bool strict; - pgFile *file; - datapagemap_iterator_t *map_iter; + int segno; + datapagemap_t map; FILE *in; const char *from_fullpath; + /* prev_backup_start_lsn */ XLogRecPtr start_lsn; CompressAlg calg; int clevel; - BackupMode backup_mode; uint32 checksum_version; } pioLocalPagesIterator; @@ -6579,7 +6571,6 @@ pioLocalDrive_pioIteratePages(VSelf, path_t from_fullpath, pgFile *file, Self(pioLocalDrive); fobj_t iter = {0}; bool use_pagemap; - datapagemap_iterator_t *map_iter = NULL; FILE *in; fobj_reset_err(err); @@ -6618,17 +6609,29 @@ pioLocalDrive_pioIteratePages(VSelf, path_t from_fullpath, pgFile *file, } BlockNumber blknum; + datapagemap_t map = {0}; if (use_pagemap) { - map_iter = datapagemap_iterate(&file->pagemap); - blknum = 0; - } else { - blknum = -1; - } - - iter = $alloc(pioLocalPagesIterator, .valid = true, .blknum = blknum, .strict = strict, - .file = file, .from_fullpath = from_fullpath, .map_iter = map_iter, - .in = in, .start_lsn = start_lsn, .calg = calg, .clevel = clevel, .backup_mode = backup_mode, + map = file->pagemap; + } + blknum = 0; + + if (start_lsn != InvalidXLogRecPtr && + !((backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) && + file->exists_in_prev)) + start_lsn = InvalidXLogRecPtr; + + iter = $alloc(pioLocalPagesIterator, + .segno = file->segno, + .blknum = blknum, + .n_blocks = file->n_blocks, + .strict = strict, + .from_fullpath = from_fullpath, + .map = map, + .in = in, + .start_lsn = start_lsn, + .calg = calg, + .clevel = clevel, .checksum_version = checksum_version); return bind_pioPagesIterator(iter); @@ -6639,40 +6642,37 @@ pioLocalPagesIterator_fobjDispose(VSelf) { Self(pioLocalPagesIterator); - if (self->map_iter) { - /* Only free iterator. Map itself is not owned by us. */ - pg_free(self->map_iter); - } if(self->in) fclose(self->in); } +static int32 prepare_page(pioLocalPagesIterator *iter, + BlockNumber blknum, + Page page, + PageState *page_st); + static err_i pioLocalPagesIterator_pioNextPage(VSelf, PageIteratorValue *value) { FOBJ_FUNC_ARP(); Self(pioLocalPagesIterator); - while (self->valid) + while (self->blknum < self->n_blocks) { char page_buf[BLCKSZ]; + BlockNumber blknum = self->blknum; /* next block */ - if (self->map_iter) + if (self->map.bitmapsize && + !datapagemap_first(self->map, &blknum)) { - /* exit if pagemap is exhausted */ - if (!datapagemap_next(self->map_iter, &(self->blknum))) - break; + self->blknum = self->n_blocks; + break; } - else - self->blknum++; - if (self->blknum >= self->file->n_blocks) - break; + value->blknum = blknum; + self->blknum = blknum+1; - int rc = prepare_page(self->file, self->start_lsn, self->blknum, - self->in, self->backup_mode, page_buf, self->strict, - self->checksum_version, self->from_fullpath, &value->state); - value->blknum = self->blknum; + int rc = prepare_page(self, blknum, page_buf, &value->state); value->page_result = rc; if (rc == PageIsTruncated) break; @@ -6685,7 +6685,6 @@ pioLocalPagesIterator_pioNextPage(VSelf, PageIteratorValue *value) return $noerr(); } value->page_result = PageIsTruncated; - self->valid = false; return $noerr(); } @@ -6706,18 +6705,13 @@ pioLocalPagesIterator_pioNextPage(VSelf, PageIteratorValue *value) * TODO: probably we should always * return it to the caller */ -int32 -prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn, - BlockNumber blknum, FILE *in, - BackupMode backup_mode, - Page page, bool strict, - uint32 checksum_version, - const char *from_fullpath, - PageState *page_st) +static int32 +prepare_page(pioLocalPagesIterator *iter, BlockNumber blknum, Page page, PageState *page_st) { int try_again = PAGE_READ_ATTEMPTS; bool page_is_valid = false; - BlockNumber absolute_blknum = file->segno * RELSEG_SIZE + blknum; + const char *from_fullpath = iter->from_fullpath; + BlockNumber absolute_blknum = iter->segno * RELSEG_SIZE + blknum; int rc = 0; /* check for interrupt */ @@ -6732,7 +6726,7 @@ prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn, while (!page_is_valid && try_again--) { /* read the block */ - int read_len = fio_pread(in, page, blknum * BLCKSZ); + int read_len = fio_pread(iter->in, page, blknum * BLCKSZ); /* The block could have been truncated. It is fine. */ if (read_len == 0) @@ -6753,7 +6747,7 @@ prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn, /* We have BLCKSZ of raw data, validate it */ rc = validate_one_page(page, absolute_blknum, InvalidXLogRecPtr, page_st, - checksum_version); + iter->checksum_version); switch (rc) { case PAGE_IS_ZEROED: @@ -6762,7 +6756,7 @@ prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn, case PAGE_IS_VALID: /* in DELTA or PTRACK modes we must compare lsn */ - if (backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) + if (iter->start_lsn != InvalidXLogRecPtr) page_is_valid = true; else return PageIsOk; @@ -6782,7 +6776,7 @@ prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn, } } /* avoid re-reading once buffered data, flushing on further attempts, see PBCKP-150 */ - fflush(in); + fflush(iter->in); } /* @@ -6799,12 +6793,12 @@ prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn, get_header_errormsg(page, &errormsg); else if (rc == PAGE_CHECKSUM_MISMATCH) get_checksum_errormsg(page, &errormsg, - file->segno * RELSEG_SIZE + blknum); + absolute_blknum); /* Error out in case of merge or backup without ptrack support; * issue warning in case of checkdb or backup with ptrack support */ - if (!strict) + if (!iter->strict) elevel = WARNING; if (errormsg) @@ -6819,23 +6813,20 @@ prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn, } /* Checkdb not going futher */ - if (!strict) + if (!iter->strict) return PageIsOk; /* * Skip page if page lsn is less than START_LSN of parent backup. * Nullified pages must be copied by DELTA backup, just to be safe. */ - if ((backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) && - file->exists_in_prev && - page_st->lsn > 0 && - page_st->lsn < prev_backup_start_lsn) + if (page_st->lsn > 0 && + page_st->lsn < iter->start_lsn) { - elog(VERBOSE, "Skipping blknum %u in file: \"%s\", file->exists_in_prev: %s, page_st->lsn: %X/%X, prev_backup_start_lsn: %X/%X", + elog(VERBOSE, "Skipping blknum %u in file: \"%s\", page_st->lsn: %X/%X, prev_backup_start_lsn: %X/%X", blknum, from_fullpath, - file->exists_in_prev ? "true" : "false", (uint32) (page_st->lsn >> 32), (uint32) page_st->lsn, - (uint32) (prev_backup_start_lsn >> 32), (uint32) prev_backup_start_lsn); + (uint32) (iter->start_lsn >> 32), (uint32) iter->start_lsn); return SkipCurrentPage; } diff --git a/src/utils/file.h b/src/utils/file.h index 243e7f187..c0c4037dd 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -232,6 +232,7 @@ fobj_error_int_key(wantedSz); fobj_error_int_key(size); fobj_error_cstr_key(kind); fobj_error_int_key(offs); +fobj_error_int_key(blknum); #ifdef HAVE_LIBZ fobj_error_kind(GZ); diff --git a/tests/backup_test.py b/tests/backup_test.py index b8c15ff66..751ae3e5c 100644 --- a/tests/backup_test.py +++ b/tests/backup_test.py @@ -1709,22 +1709,11 @@ def test_basic_missing_file_permissions(self): os.chmod(full_path, 000) - try: + with self.assertRaisesRegex(ProbackupException, + r"ERROR: [^\n]*Cannot iterate pages: Permission denied"): # FULL backup self.backup_node( backup_dir, 'node', node, options=['--stream']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of missing permissions" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: send_pages: Cannot iterate pages: Permission denied', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) os.chmod(full_path, 700) From 23dff595649374d4b465160a9b040cbaf53b98e7 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 8 Dec 2022 12:53:09 +0300 Subject: [PATCH 143/339] [PBCKP-338] simpler iterate pages signature - add pioIteratePages2 with simpler signature. In fact, it is pretty close to fio_send_request used in fio_send_pages/ fio_copy_pages. to parse conditioned parameters, doIteratePages introduced - rename back pioIteragePages2 -> pioIteratePages - remove fio_send_pages, fio_copy_pages --- src/data.c | 21 +- src/pg_probackup.h | 10 +- src/utils/file.c | 906 ++++++--------------------------------------- src/utils/file.h | 35 +- 4 files changed, 152 insertions(+), 820 deletions(-) diff --git a/src/data.c b/src/data.c index 32a44be15..c289421c5 100644 --- a/src/data.c +++ b/src/data.c @@ -1323,10 +1323,13 @@ check_data_file(pgFile *file, const char *from_fullpath, uint32 checksum_version bool is_valid = true; err_i err; - pages = $i(pioIteratePages, local_location, .from_fullpath = from_fullpath, - .file = file, .start_lsn = InvalidXLogRecPtr, .calg = /* No data needed */ NONE_COMPRESS, .clevel = 0, - .checksum_version = checksum_version, .backup_mode = BACKUP_MODE_FULL, - .strict = false, .err = &err); + pages = doIteratePages(local_location, + .from_fullpath = from_fullpath, + .file = file, + .checksum_version = checksum_version, + .backup_mode = BACKUP_MODE_FULL, + .strict = false, + .err = &err); if ($haserr(err)) { if (getErrno(err) == ENOENT) { @@ -1787,10 +1790,10 @@ send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, parray *harray = NULL; err_i err = $noerr(); - pages = $i(pioIteratePages, db_location, .from_fullpath = from_fullpath, .file = file, + pages = doIteratePages(db_location, .from_fullpath = from_fullpath, .file = file, .start_lsn = prev_backup_start_lsn, .calg = calg, .clevel = clevel, .checksum_version = checksum_version, .backup_mode = backup_mode, - .strict = true, .err = &err); + .err = &err); if ($haserr(err)) return $iresult(err); @@ -1895,10 +1898,10 @@ copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, pioPagesIterator_i pages; pioFile_i out; - pages = $i(pioIteratePages, backup_location, .from_fullpath = from_fullpath, - .file = file, .start_lsn = sync_lsn, .calg = NONE_COMPRESS, .clevel = 0, + pages = doIteratePages(backup_location, .from_fullpath = from_fullpath, + .file = file, .start_lsn = sync_lsn, .checksum_version = checksum_version, - .backup_mode = backup_mode, .strict = true, .err = &err); + .backup_mode = backup_mode, .err = &err); if ($haserr(err)) return $iresult(err); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 5faf49b7b..4a7f06c33 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1153,14 +1153,6 @@ extern parray * pg_ptrack_get_pagemapset(PGconn *backup_conn, const char *ptrack extern FILE* open_local_file_rw(const char *to_fullpath, char **out_buf, uint32 buf_size); /* FIO */ -extern int fio_send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, - XLogRecPtr horizonLsn, int calg, int clevel, uint32 checksum_version, - bool use_pagemap, BlockNumber *err_blknum, char **errormsg, - BackupPageHeader2 **headers); -extern int fio_copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, - XLogRecPtr horizonLsn, int calg, int clevel, uint32 checksum_version, - bool use_pagemap, BlockNumber *err_blknum, char **errormsg); -/* return codes for fio_send_pages */ extern int fio_send_file(const char *from_fullpath, FILE* out, bool cut_zero_tail, pgFile *file, char **errormsg); extern int fio_send_file_local(const char *from_fullpath, FILE* out, bool cut_zero_tail, @@ -1171,7 +1163,7 @@ extern bool pgut_rmtree(const char *path, bool rmtopdir, bool strict); extern void pgut_setenv(const char *key, const char *val); extern void pgut_unsetenv(const char *key); -/* return codes for fio_send_pages() and fio_send_file() */ +/* return codes for fio_send_file() */ #define SEND_OK (0) #define FILE_MISSING (-1) #define OPEN_FAILED (-2) diff --git a/src/utils/file.c b/src/utils/file.c index 6a337359c..83387fb50 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -84,28 +84,17 @@ typedef struct uint32 checksumVersion; } fio_lsn_map_request; -typedef struct +typedef struct __attribute__((packed)) { - size_t from_fullpath_len; + int32_t segno; + int32_t pagemaplen; XLogRecPtr start_lsn; CompressAlg calg; int clevel; uint32 checksum_version; - BackupMode backup_mode; - bool strict; - - int64_t file_size; - size_t file_rel_path_len; - size_t file_linked_len; - int file_segno; - bool file_exists_in_prev; - bool file_pagemap_isabsent; - size_t file_bitmapsize; + int strict; } fio_iterate_pages_request; -static void fio_iterate_pages_impl(pioDrive_i drive, int out, const char *from_fullpath, - pgFile *file, fio_iterate_pages_request *params); - /* Convert FIO pseudo handle to index in file descriptor array */ #define fio_fileno(f) (((size_t)f - 1) | FIO_PIPE_MARKER) @@ -1852,362 +1841,6 @@ fio_load_file(int out, const char* path) } } -/* - * Return number of actually(!) readed blocks, attempts or - * half-readed block are not counted. - * Return values in case of error: - * FILE_MISSING - * OPEN_FAILED - * READ_ERROR - * PAGE_CORRUPTION - * WRITE_FAILED - * - * If none of the above, this function return number of blocks - * readed by remote agent. - * - * In case of DELTA mode horizonLsn must be a valid lsn, - * otherwise it should be set to InvalidXLogRecPtr. - */ -int -fio_send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, - XLogRecPtr horizonLsn, int calg, int clevel, uint32 checksum_version, - bool use_pagemap, BlockNumber* err_blknum, char **errormsg, - BackupPageHeader2 **headers) -{ - FILE *out = NULL; - char *out_buf = NULL; - struct { - fio_header hdr; - fio_send_request arg; - } req; - BlockNumber n_blocks_read = 0; - BlockNumber blknum = 0; - - /* send message with header - - 16bytes 24bytes var var - -------------------------------------------------------------- - | fio_header | fio_send_request | FILE PATH | BITMAP(if any) | - -------------------------------------------------------------- - */ - - req.hdr.cop = FIO_SEND_PAGES; - - if (use_pagemap) - { - req.hdr.size = sizeof(fio_send_request) + (*file).pagemap.bitmapsize + strlen(from_fullpath) + 1; - req.arg.bitmapsize = (*file).pagemap.bitmapsize; - - /* TODO: add optimization for the case of pagemap - * containing small number of blocks with big serial numbers: - * https://github.com/postgrespro/pg_probackup/blob/remote_page_backup/src/utils/file.c#L1211 - */ - } - else - { - req.hdr.size = sizeof(fio_send_request) + strlen(from_fullpath) + 1; - req.arg.bitmapsize = 0; - } - - req.arg.nblocks = ft_div_i64u32_to_i32(file->size, BLCKSZ); - req.arg.segmentno = file->segno * RELSEG_SIZE; - req.arg.horizonLsn = horizonLsn; - req.arg.checksumVersion = checksum_version; - req.arg.calg = calg; - req.arg.clevel = clevel; - req.arg.path_len = strlen(from_fullpath) + 1; - - file->compress_alg = calg; /* TODO: wtf? why here? */ - -//<----- -// datapagemap_iterator_t *iter; -// BlockNumber blkno; -// iter = datapagemap_iterate(pagemap); -// while (datapagemap_next(iter, &blkno)) -// elog(INFO, "block %u", blkno); -// pg_free(iter); -//<----- - - /* send header */ - IO_CHECK(fio_write_all(fio_stdout, &req, sizeof(req)), sizeof(req)); - - /* send file path */ - IO_CHECK(fio_write_all(fio_stdout, from_fullpath, req.arg.path_len), req.arg.path_len); - - /* send pagemap if any */ - if (use_pagemap) - IO_CHECK(fio_write_all(fio_stdout, (*file).pagemap.bitmap, (*file).pagemap.bitmapsize), (*file).pagemap.bitmapsize); - - while (true) - { - fio_header hdr; - char buf[BLCKSZ + sizeof(BackupPageHeader)]; - IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); - - if (interrupted) - elog(ERROR, "Interrupted during page reading"); - - if (hdr.cop == FIO_ERROR) - { - /* FILE_MISSING, OPEN_FAILED and READ_FAILED */ - if (hdr.size > 0) - { - IO_CHECK(fio_read_all(fio_stdin, buf, hdr.size), hdr.size); - *errormsg = pgut_malloc(hdr.size); - snprintf(*errormsg, hdr.size, "%s", buf); - } - - return hdr.arg; - } - else if (hdr.cop == FIO_SEND_FILE_CORRUPTION) - { - *err_blknum = hdr.arg; - - if (hdr.size > 0) - { - IO_CHECK(fio_read_all(fio_stdin, buf, hdr.size), hdr.size); - *errormsg = pgut_malloc(hdr.size); - snprintf(*errormsg, hdr.size, "%s", buf); - } - return PAGE_CORRUPTION; - } - else if (hdr.cop == FIO_SEND_FILE_EOF) - { - /* n_blocks_read reported by EOF */ - n_blocks_read = hdr.arg; - - /* receive headers if any */ - if (hdr.size > 0) - { - *headers = pgut_malloc(hdr.size); - IO_CHECK(fio_read_all(fio_stdin, *headers, hdr.size), hdr.size); - file->n_headers = (hdr.size / sizeof(BackupPageHeader2)) -1; - } - - break; - } - else if (hdr.cop == FIO_PAGE) - { - blknum = hdr.arg; - - Assert(hdr.size <= sizeof(buf)); - IO_CHECK(fio_read_all(fio_stdin, buf, hdr.size), hdr.size); - - COMP_CRC32C(file->crc, buf, hdr.size); - - /* lazily open backup file */ - if (!out) - out = open_local_file_rw(to_fullpath, &out_buf, STDIO_BUFSIZE); - - if (fio_fwrite(out, buf, hdr.size) != hdr.size) - { - fio_fclose(out); - *err_blknum = blknum; - return WRITE_FAILED; - } - file->write_size += hdr.size; - file->uncompressed_size += BLCKSZ; - } - else - elog(ERROR, "Remote agent returned message of unexpected type: %i", hdr.cop); - } - - if (out) - fclose(out); - pg_free(out_buf); - - return n_blocks_read; -} - -/* - * Return number of actually(!) readed blocks, attempts or - * half-readed block are not counted. - * Return values in case of error: - * FILE_MISSING - * OPEN_FAILED - * READ_ERROR - * PAGE_CORRUPTION - * WRITE_FAILED - * - * If none of the above, this function return number of blocks - * readed by remote agent. - * - * In case of DELTA mode horizonLsn must be a valid lsn, - * otherwise it should be set to InvalidXLogRecPtr. - * Взято из fio_send_pages - */ -int -fio_copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, - XLogRecPtr horizonLsn, int calg, int clevel, uint32 checksum_version, - bool use_pagemap, BlockNumber* err_blknum, char **errormsg) -{ - FILE *out = NULL; - char *out_buf = NULL; - struct { - fio_header hdr; - fio_send_request arg; - } req; - BlockNumber n_blocks_read = 0; - BlockNumber blknum = 0; - - /* send message with header - - 16bytes 24bytes var var - -------------------------------------------------------------- - | fio_header | fio_send_request | FILE PATH | BITMAP(if any) | - -------------------------------------------------------------- - */ - - req.hdr.cop = FIO_SEND_PAGES; - - if (use_pagemap) - { - req.hdr.size = sizeof(fio_send_request) + (*file).pagemap.bitmapsize + strlen(from_fullpath) + 1; - req.arg.bitmapsize = (*file).pagemap.bitmapsize; - - /* TODO: add optimization for the case of pagemap - * containing small number of blocks with big serial numbers: - * https://github.com/postgrespro/pg_probackup/blob/remote_page_backup/src/utils/file.c#L1211 - */ - } - else - { - req.hdr.size = sizeof(fio_send_request) + strlen(from_fullpath) + 1; - req.arg.bitmapsize = 0; - } - - req.arg.nblocks = file->size/BLCKSZ; - req.arg.segmentno = file->segno * RELSEG_SIZE; - req.arg.horizonLsn = horizonLsn; - req.arg.checksumVersion = checksum_version; - req.arg.calg = calg; - req.arg.clevel = clevel; - req.arg.path_len = strlen(from_fullpath) + 1; - - file->compress_alg = calg; /* TODO: wtf? why here? */ - -//<----- -// datapagemap_iterator_t *iter; -// BlockNumber blkno; -// iter = datapagemap_iterate(pagemap); -// while (datapagemap_next(iter, &blkno)) -// elog(INFO, "block %u", blkno); -// pg_free(iter); -//<----- - - /* send header */ - IO_CHECK(fio_write_all(fio_stdout, &req, sizeof(req)), sizeof(req)); - - /* send file path */ - IO_CHECK(fio_write_all(fio_stdout, from_fullpath, req.arg.path_len), req.arg.path_len); - - /* send pagemap if any */ - if (use_pagemap) - IO_CHECK(fio_write_all(fio_stdout, (*file).pagemap.bitmap, (*file).pagemap.bitmapsize), (*file).pagemap.bitmapsize); - - out = fio_fopen(FIO_BACKUP_HOST, to_fullpath, PG_BINARY_R "+"); - if (out == NULL) - elog(ERROR, "Cannot open restore target file \"%s\": %s", to_fullpath, strerror(errno)); - - /* update file permission */ - if (fio_chmod(FIO_BACKUP_HOST, to_fullpath, file->mode) == -1) - elog(ERROR, "Cannot change mode of \"%s\": %s", to_fullpath, - strerror(errno)); - - elog(VERBOSE, "ftruncate file \"%s\" to size %zu", - to_fullpath, file->size); - if (fio_ftruncate(out, file->size) == -1) - elog(ERROR, "Cannot ftruncate file \"%s\" to size %zu: %s", - to_fullpath, file->size, strerror(errno)); - - if (!fio_is_remote_file(out)) - { - out_buf = pgut_malloc(STDIO_BUFSIZE); - setvbuf(out, out_buf, _IOFBF, STDIO_BUFSIZE); - } - - while (true) - { - fio_header hdr; - char buf[BLCKSZ + sizeof(BackupPageHeader)]; - IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); - - if (interrupted) - elog(ERROR, "Interrupted during page reading"); - - if (hdr.cop == FIO_ERROR) - { - /* FILE_MISSING, OPEN_FAILED and READ_FAILED */ - if (hdr.size > 0) - { - IO_CHECK(fio_read_all(fio_stdin, buf, hdr.size), hdr.size); - *errormsg = pgut_malloc(hdr.size); - snprintf(*errormsg, hdr.size, "%s", buf); - } - - return hdr.arg; - } - else if (hdr.cop == FIO_SEND_FILE_CORRUPTION) - { - *err_blknum = hdr.arg; - - if (hdr.size > 0) - { - IO_CHECK(fio_read_all(fio_stdin, buf, hdr.size), hdr.size); - *errormsg = pgut_malloc(hdr.size); - snprintf(*errormsg, hdr.size, "%s", buf); - } - return PAGE_CORRUPTION; - } - else if (hdr.cop == FIO_SEND_FILE_EOF) - { - /* n_blocks_read reported by EOF */ - n_blocks_read = hdr.arg; - - /* receive headers if any */ - if (hdr.size > 0) - { - char *tmp = pgut_malloc(hdr.size); - IO_CHECK(fio_read_all(fio_stdin, tmp, hdr.size), hdr.size); - pg_free(tmp); - } - - break; - } - else if (hdr.cop == FIO_PAGE) - { - blknum = hdr.arg; - - Assert(hdr.size <= sizeof(buf)); - IO_CHECK(fio_read_all(fio_stdin, buf, hdr.size), hdr.size); - - if (fio_fseek(out, blknum * BLCKSZ) < 0) - { - elog(ERROR, "Cannot seek block %u of \"%s\": %s", - blknum, to_fullpath, strerror(errno)); - } - // должен прилетать некомпрессированный блок с заголовком - // Вставить assert? - if (fio_fwrite(out, buf + sizeof(BackupPageHeader), hdr.size - sizeof(BackupPageHeader)) != BLCKSZ) - { - fio_fclose(out); - *err_blknum = blknum; - return WRITE_FAILED; - } - file->write_size += BLCKSZ; - file->uncompressed_size += BLCKSZ; - } - else - elog(ERROR, "Remote agent returned message of unexpected type: %i", hdr.cop); - } - - if (out) - fclose(out); - pg_free(out_buf); - - return n_blocks_read; -} - static void fio_send_pio_err(int out, err_i err) { @@ -2232,17 +1865,25 @@ fio_receive_pio_err(fio_header *hdr) } static void -fio_iterate_pages_impl(pioDrive_i drive, int out, const char *from_fullpath, - pgFile *file, fio_iterate_pages_request *params) +fio_iterate_pages_impl(pioDrive_i drive, int out, const char *path, + datapagemap_t pagemap, + fio_iterate_pages_request *params) { pioPagesIterator_i pages; err_i err = $noerr(); fio_header hdr = {.cop=FIO_ITERATE_DATA}; - pages = $i(pioIteratePages, drive, .from_fullpath = from_fullpath, .file = file, - .start_lsn = params->start_lsn, .calg = params->calg, .clevel = params->clevel, - .checksum_version = params->checksum_version, .backup_mode = params->backup_mode, - .strict = params->strict, .err = &err); + pages = $i(pioIteratePages, drive, + .path = path, + .segno = params->segno, + .pagemap = pagemap, + .start_lsn = params->start_lsn, + .calg = params->calg, + .clevel = params->clevel, + .checksum_version = params->checksum_version, + .strict = params->strict, + .err = &err); + if ($haserr(err)) { fio_send_pio_err(out, err); @@ -2278,277 +1919,6 @@ fio_iterate_pages_impl(pioDrive_i drive, int out, const char *from_fullpath, IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); } -/* TODO: read file using large buffer - * Return codes: - * FIO_ERROR: - * FILE_MISSING (-1) - * OPEN_FAILED (-2) - * READ_FAILED (-3) - - * FIO_SEND_FILE_CORRUPTION - * FIO_SEND_FILE_EOF - */ -static void -fio_send_pages_impl(int out, char* buf) -{ - FILE *in = NULL; - BlockNumber blknum = 0; - int current_pos = 0; - BlockNumber n_blocks_read = 0; - PageState page_st; - char read_buffer[BLCKSZ+1]; - char in_buf[STDIO_BUFSIZE]; - fio_header hdr; - fio_send_request *req = (fio_send_request*) buf; - char *from_fullpath = (char*) buf + sizeof(fio_send_request); - bool with_pagemap = req->bitmapsize > 0 ? true : false; - /* error reporting */ - char *errormsg = NULL; - /* parse buffer */ - datapagemap_t *map = NULL; - datapagemap_iterator_t *iter = NULL; - /* page headers */ - int32 hdr_num = -1; - int32 cur_pos_out = 0; - BackupPageHeader2 *headers = NULL; - - /* open source file */ - in = fopen(from_fullpath, PG_BINARY_R); - if (!in) - { - hdr.cop = FIO_ERROR; - - /* do not send exact wording of ENOENT error message - * because it is a very common error in our case, so - * error code is enough. - */ - if (errno == ENOENT) - { - hdr.arg = FILE_MISSING; - hdr.size = 0; - } - else - { - hdr.arg = OPEN_FAILED; - errormsg = pgut_malloc(ERRMSG_MAX_LEN); - /* Construct the error message */ - snprintf(errormsg, ERRMSG_MAX_LEN, "Cannot open file \"%s\": %s", - from_fullpath, strerror(errno)); - hdr.size = strlen(errormsg) + 1; - } - - /* send header and message */ - IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); - if (errormsg) - IO_CHECK(fio_write_all(out, errormsg, hdr.size), hdr.size); - - goto cleanup; - } - - if (with_pagemap) - { - map = pgut_malloc(sizeof(datapagemap_t)); - map->bitmapsize = req->bitmapsize; - map->bitmap = (char*) buf + sizeof(fio_send_request) + req->path_len; - - /* get first block */ - iter = datapagemap_iterate(map); - datapagemap_next(iter, &blknum); - - setvbuf(in, NULL, _IONBF, BUFSIZ); - } - else - setvbuf(in, in_buf, _IOFBF, STDIO_BUFSIZE); - - /* TODO: what is this barrier for? */ - read_buffer[BLCKSZ] = 1; /* barrier */ - - while (blknum < req->nblocks) - { - int rc = 0; - size_t read_len = 0; - int retry_attempts = PAGE_READ_ATTEMPTS; - - /* TODO: handle signals on the agent */ - if (interrupted) - elog(ERROR, "Interrupted during remote page reading"); - - /* read page, check header and validate checksumms */ - for (;;) - { - /* - * Optimize stdio buffer usage, fseek only when current position - * does not match the position of requested block. - */ - if (current_pos != blknum*BLCKSZ) - { - current_pos = blknum*BLCKSZ; - if (fseek(in, current_pos, SEEK_SET) != 0) - elog(ERROR, "fseek to position %u is failed on remote file '%s': %s", - current_pos, from_fullpath, strerror(errno)); - } - - read_len = fread(read_buffer, 1, BLCKSZ, in); - - current_pos += read_len; - - /* report error */ - if (ferror(in)) - { - hdr.cop = FIO_ERROR; - hdr.arg = READ_FAILED; - - errormsg = pgut_malloc(ERRMSG_MAX_LEN); - /* Construct the error message */ - snprintf(errormsg, ERRMSG_MAX_LEN, "Cannot read block %u of '%s': %s", - blknum, from_fullpath, strerror(errno)); - hdr.size = strlen(errormsg) + 1; - - /* send header and message */ - IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(out, errormsg, hdr.size), hdr.size); - goto cleanup; - } - - if (read_len == BLCKSZ) - { - rc = validate_one_page(read_buffer, req->segmentno + blknum, - InvalidXLogRecPtr, &page_st, - req->checksumVersion); - - /* TODO: optimize copy of zeroed page */ - if (rc == PAGE_IS_ZEROED) - break; - else if (rc == PAGE_IS_VALID) - break; - } - - if (feof(in)) - goto eof; -// else /* readed less than BLKSZ bytes, retry */ - - /* File is either has insane header or invalid checksum, - * retry. If retry attempts are exhausted, report corruption. - */ - if (--retry_attempts == 0) - { - hdr.cop = FIO_SEND_FILE_CORRUPTION; - hdr.arg = blknum; - - /* Construct the error message */ - if (rc == PAGE_HEADER_IS_INVALID) - get_header_errormsg(read_buffer, &errormsg); - else if (rc == PAGE_CHECKSUM_MISMATCH) - get_checksum_errormsg(read_buffer, &errormsg, - req->segmentno + blknum); - - /* if error message is not empty, set payload size to its length */ - hdr.size = errormsg ? strlen(errormsg) + 1 : 0; - - /* send header */ - IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); - - /* send error message if any */ - if (errormsg) - IO_CHECK(fio_write_all(out, errormsg, hdr.size), hdr.size); - - goto cleanup; - } - } - - n_blocks_read++; - - /* - * horizonLsn is not 0 only in case of delta and ptrack backup. - * As far as unsigned number are always greater or equal than zero, - * there is no sense to add more checks. - */ - if ((req->horizonLsn == InvalidXLogRecPtr) || /* full, page */ - (page_st.lsn == InvalidXLogRecPtr) || /* zeroed page */ - (req->horizonLsn > 0 && page_st.lsn > req->horizonLsn)) /* delta, ptrack */ - { - int compressed_size = 0; - char write_buffer[BLCKSZ*2]; - BackupPageHeader* bph = (BackupPageHeader*)write_buffer; - - /* compress page */ - hdr.cop = FIO_PAGE; - hdr.arg = blknum; - - compressed_size = do_compress(write_buffer + sizeof(BackupPageHeader), - sizeof(write_buffer) - sizeof(BackupPageHeader), - read_buffer, BLCKSZ, req->calg, req->clevel, - NULL); - - if (compressed_size <= 0 || compressed_size >= BLCKSZ) - { - /* Do not compress page */ - memcpy(write_buffer + sizeof(BackupPageHeader), read_buffer, BLCKSZ); - compressed_size = BLCKSZ; - } - bph->block = blknum; - bph->compressed_size = compressed_size; - - hdr.size = compressed_size + sizeof(BackupPageHeader); - - IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(out, write_buffer, hdr.size), hdr.size); - - /* set page header for this file */ - hdr_num++; - if (!headers) - headers = (BackupPageHeader2 *) pgut_malloc(sizeof(BackupPageHeader2)); - else - headers = (BackupPageHeader2 *) pgut_realloc(headers, (hdr_num+1) * sizeof(BackupPageHeader2)); - - headers[hdr_num].block = blknum; - headers[hdr_num].lsn = page_st.lsn; - headers[hdr_num].checksum = page_st.checksum; - headers[hdr_num].pos = cur_pos_out; - - cur_pos_out += hdr.size; - } - - /* next block */ - if (with_pagemap) - { - /* exit if pagemap is exhausted */ - if (!datapagemap_next(iter, &blknum)) - break; - } - else - blknum++; - } - -eof: - /* We are done, send eof */ - hdr.cop = FIO_SEND_FILE_EOF; - hdr.arg = n_blocks_read; - hdr.size = 0; - - if (headers) - { - hdr.size = (hdr_num+2) * sizeof(BackupPageHeader2); - - /* add dummy header */ - headers = (BackupPageHeader2 *) pgut_realloc(headers, (hdr_num+2) * sizeof(BackupPageHeader2)); - headers[hdr_num+1].pos = cur_pos_out; - } - IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); - - if (headers) - IO_CHECK(fio_write_all(out, headers, hdr.size), hdr.size); - -cleanup: - pg_free(map); - pg_free(iter); - pg_free(errormsg); - pg_free(headers); - if (in) - fclose(in); - return; -} - typedef struct send_file_state { bool calc_crc; uint32_t crc; @@ -4051,10 +3421,6 @@ fio_communicate(int in, int out) case FIO_REMOVE_DIR: fio_remove_dir_impl(out, buf); break; - case FIO_SEND_PAGES: - /* buf contain fio_send_request header and bitmap. */ - fio_send_pages_impl(out, buf); - break; case FIO_SEND_FILE: fio_send_file_impl(out, buf); break; @@ -4136,58 +3502,20 @@ fio_communicate(int in, int out) case FIO_ITERATE_PAGES: { ft_bytes_t bytes = {.ptr = buf, .len = hdr.size}; - fio_iterate_pages_request *params; - char *from_fullpath = NULL; - char *rel_path = NULL; - char *linked = NULL; - pgFile *file = NULL; + char *from_fullpath; + datapagemap_t pagemap; - params = (fio_iterate_pages_request *)bytes.ptr; - ft_bytes_consume(&bytes, sizeof(fio_iterate_pages_request)); + params = (fio_iterate_pages_request*)bytes.ptr; + ft_bytes_consume(&bytes, sizeof(*params)); - if (params->from_fullpath_len) - { - from_fullpath = bytes.ptr; - ft_bytes_consume(&bytes, params->from_fullpath_len); - } + pagemap.bitmapsize = params->pagemaplen; + pagemap.bitmap = bytes.ptr; + ft_bytes_consume(&bytes, pagemap.bitmapsize); - if (params->file_rel_path_len) - { - // free-d in pgFileFree - rel_path = pgut_malloc(params->file_rel_path_len); - memcpy(rel_path, bytes.ptr, params->file_rel_path_len); - ft_bytes_consume(&bytes, params->file_rel_path_len); - } + from_fullpath = bytes.ptr; - if (params->file_linked_len) - { - // free-d in pgFileFree - linked = pgut_malloc(params->file_linked_len); - memcpy(linked, bytes.ptr, params->file_linked_len); - ft_bytes_consume(&bytes, params->file_linked_len); - } - - file = pgFileInit(rel_path); - - file->size = params->file_size; - file->segno = params->file_segno; - file->exists_in_prev = params->file_exists_in_prev; - file->pagemap_isabsent = params->file_pagemap_isabsent; - file->rel_path = rel_path; - file->linked = linked; - - file->pagemap.bitmapsize = params->file_bitmapsize; - if (params->file_bitmapsize) - { - file->pagemap.bitmap = pgut_malloc(params->file_bitmapsize); - memcpy(file->pagemap.bitmap, bytes.ptr, params->file_bitmapsize); - ft_bytes_consume(&bytes, params->file_bitmapsize); - } - - fio_iterate_pages_impl(drive, out, from_fullpath, file, params); - - pgFileFree(file); + fio_iterate_pages_impl(drive, out, from_fullpath, pagemap, params); } break; default: @@ -6463,67 +5791,41 @@ fobj_klass(pioLocalPagesIterator); fobj_klass(pioRemotePagesIterator); static pioPagesIterator_i -pioRemoteDrive_pioIteratePages(VSelf, path_t from_fullpath, pgFile *file, - XLogRecPtr start_lsn, CompressAlg calg, int clevel, - uint32 checksum_version, BackupMode backup_mode, - bool strict, err_i *err) +pioRemoteDrive_pioIteratePages(VSelf, path_t from_fullpath, + int segno, datapagemap_t pagemap, + XLogRecPtr start_lsn, + CompressAlg calg, int clevel, + uint32 checksum_version, bool strict, err_i *err) { Self(pioRemoteDrive); fobj_t iter = {0}; fio_header hdr = {.cop = FIO_ITERATE_PAGES}; - fio_iterate_pages_request params; - memset(¶ms, 0, sizeof(params)); - params = (fio_iterate_pages_request){ - .from_fullpath_len = strlen(from_fullpath)+1, - .start_lsn = start_lsn, - .calg = calg, - .clevel = clevel, - .checksum_version=checksum_version, - .backup_mode = backup_mode, - .strict=strict, - .file_size = file->size, - .file_rel_path_len = file->rel_path?strlen(file->rel_path)+1:0, - .file_linked_len = file->linked?strlen(file->linked)+1:0, - .file_segno = file->segno, - .file_exists_in_prev = file->exists_in_prev, - .file_pagemap_isabsent = file->pagemap_isabsent, - .file_bitmapsize = file->pagemap.bitmapsize + ft_strbuf_t buf = ft_strbuf_zero(); + fio_iterate_pages_request req = { + .segno = segno, + .pagemaplen = pagemap.bitmapsize, + .start_lsn = start_lsn, + .calg = calg, + .clevel = clevel, + .checksum_version = checksum_version, + .strict = strict, }; - fobj_reset_err(err); - - if (file->size % BLCKSZ != 0) - elog(WARNING, "File: \"%s\", invalid file size %zu", from_fullpath, file->size); - - size_t total_size = sizeof(hdr) + sizeof(params) + params.from_fullpath_len - + params.file_rel_path_len + params.file_linked_len + params.file_bitmapsize; - - ft_strbuf_t req = ft_strbuf_zero(); - - hdr.size = total_size - sizeof(hdr); - - ft_strbuf_catbytes(&req, ft_bytes(&hdr, sizeof(hdr))); + ft_strbuf_catbytes(&buf, ft_bytes(&hdr, sizeof(hdr))); + ft_strbuf_catbytes(&buf, ft_bytes(&req, sizeof(req))); + ft_strbuf_catbytes(&buf, ft_bytes(pagemap.bitmap, pagemap.bitmapsize)); + ft_strbuf_catc(&buf, from_fullpath); + ft_strbuf_cat1(&buf, '\0'); - ft_strbuf_catbytes(&req, ft_bytes(¶ms, sizeof(fio_iterate_pages_request))); - if(params.from_fullpath_len) - ft_strbuf_catbytes(&req, ft_bytes((char *)from_fullpath, params.from_fullpath_len)); - if(params.file_rel_path_len) - ft_strbuf_catbytes(&req, ft_bytes(file->rel_path, params.file_rel_path_len)); - if(params.file_linked_len) - ft_strbuf_catbytes(&req, ft_bytes(file->linked, params.file_linked_len)); - if(params.file_bitmapsize) - ft_strbuf_catbytes(&req, ft_bytes(file->pagemap.bitmap, params.file_bitmapsize)); + ((fio_header*)buf.ptr)->size = buf.len - sizeof(fio_header); - Assert(req.len == total_size); - Assert(!req.overflowed); - - IO_CHECK(fio_write_all(fio_stdout, req.ptr, req.len), req.len); + IO_CHECK(fio_write_all(fio_stdout, buf.ptr, buf.len), buf.len); - ft_strbuf_free(&req); + ft_strbuf_free(&buf); - iter = $alloc(pioRemotePagesIterator, .valid = true); + iter = $alloc(pioRemotePagesIterator, .valid = true); - return bind_pioPagesIterator(iter); + return bind_pioPagesIterator(iter); } static err_i @@ -6562,45 +5864,60 @@ pioRemotePagesIterator_pioNextPage(VSelf, PageIteratorValue *value) intCode(hdr.cop)); } -static pioPagesIterator_i -pioLocalDrive_pioIteratePages(VSelf, path_t from_fullpath, pgFile *file, - XLogRecPtr start_lsn, CompressAlg calg, int clevel, - uint32 checksum_version, BackupMode backup_mode, - bool strict, err_i *err) +pioPagesIterator_i +doIteratePages_impl(pioDrive_i drive, struct doIteratePages_params p) { - Self(pioLocalDrive); - fobj_t iter = {0}; - bool use_pagemap; - FILE *in; - - fobj_reset_err(err); - - if (file->size % BLCKSZ != 0) - elog(WARNING, "File: \"%s\", invalid file size %zu", from_fullpath, file->size); - - /* - * Compute expected number of blocks in the file. - * NOTE This is a normal situation, if the file size has changed - * since the moment we computed it. - */ - file->n_blocks = ft_div_i64u32_to_i32(file->size, BLCKSZ); + datapagemap_t pagemap = {0}; + fobj_reset_err(p.err); /* * If page map is empty or file is not present in destination directory, * then copy backup all pages of the relation. */ + if (p.file->pagemap.bitmapsize != PageBitmapIsEmpty && + !p.file->pagemap_isabsent && p.file->exists_in_prev && + p.file->pagemap.bitmap) + pagemap = p.file->pagemap; - if (file->pagemap.bitmapsize == PageBitmapIsEmpty || - file->pagemap_isabsent || !file->exists_in_prev || - !file->pagemap.bitmap) - use_pagemap = false; - else - use_pagemap = true; + /* Skip page if page lsn is less than START_LSN of parent backup. */ + if (p.start_lsn != InvalidXLogRecPtr) + { + if (!p.file->exists_in_prev) + p.start_lsn = InvalidXLogRecPtr; + if (p.backup_mode != BACKUP_MODE_DIFF_DELTA && + p.backup_mode != BACKUP_MODE_DIFF_PTRACK) + p.start_lsn = InvalidXLogRecPtr; + } - if (use_pagemap) - elog(LOG, "Using pagemap for file \"%s\"", file->rel_path); + return $i(pioIteratePages, drive, + .path = p.from_fullpath, + .segno = p.file->segno, + .pagemap = pagemap, + .start_lsn = p.start_lsn, + .calg = p.calg, + .clevel = p.clevel, + .checksum_version = p.checksum_version, + .strict = p.strict, + .err = p.err); +} - in = fopen(from_fullpath, PG_BINARY_R); +static pioPagesIterator_i +pioLocalDrive_pioIteratePages(VSelf, path_t path, + int segno, datapagemap_t pagemap, + XLogRecPtr start_lsn, + CompressAlg calg, int clevel, + uint32 checksum_version, bool strict, err_i *err) +{ + Self(pioLocalDrive); + fobj_t iter = {0}; + BlockNumber n_blocks; + FILE *in; + int fd; + struct stat st; + + fobj_reset_err(err); + + in = fopen(path, PG_BINARY_R); if (!in) { pioPagesIterator_i ret = {0}; @@ -6608,33 +5925,32 @@ pioLocalDrive_pioIteratePages(VSelf, path_t from_fullpath, pgFile *file, return ret; } - BlockNumber blknum; - datapagemap_t map = {0}; - if (use_pagemap) + fd = fileno(in); + if (fstat(fd, &st) == -1) { - map = file->pagemap; + *err = $syserr(errno, "Cannot stat datafile"); } - blknum = 0; - if (start_lsn != InvalidXLogRecPtr && - !((backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) && - file->exists_in_prev)) - start_lsn = InvalidXLogRecPtr; + /* + * Compute expected number of blocks in the file. + * NOTE This is a normal situation, if the file size has changed + * since the moment we computed it. + */ + n_blocks = ft_div_i64u32_to_i32(st.st_size, BLCKSZ); - iter = $alloc(pioLocalPagesIterator, - .segno = file->segno, - .blknum = blknum, - .n_blocks = file->n_blocks, + iter = $alloc(pioLocalPagesIterator, + .segno = segno, + .n_blocks = n_blocks, .strict = strict, - .from_fullpath = from_fullpath, - .map = map, + .from_fullpath = path, + .map = pagemap, .in = in, .start_lsn = start_lsn, .calg = calg, .clevel = clevel, .checksum_version = checksum_version); - return bind_pioPagesIterator(iter); + return bind_pioPagesIterator(iter); } static void diff --git a/src/utils/file.h b/src/utils/file.h index c0c4037dd..3141e07be 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -14,6 +14,8 @@ #include +#include "datapagemap.h" + /* Directory/File permission */ #define DIR_PERMISSION (0700) #define FILE_PERMISSION (0600) @@ -48,8 +50,6 @@ typedef enum /* used for incremental restore */ FIO_GET_CHECKSUM_MAP, FIO_GET_LSN_MAP, - /* used in fio_send_pages */ - FIO_SEND_PAGES, FIO_ERROR, FIO_SEND_FILE, // FIO_CHUNK, @@ -311,11 +311,10 @@ fobj_iface(pioPagesIterator); #define mth__pioWriteFile err_i, (path_t, path), (ft_bytes_t, content), (bool, binary) #define mth__pioWriteFile__optional() (binary, true) -#define mth__pioIteratePages pioPagesIterator_i, (path_t, from_fullpath), \ - (pgFile *, file), (XLogRecPtr, start_lsn), (CompressAlg, calg), (int, clevel), \ - (uint32, checksum_version), (BackupMode, backup_mode), (bool, strict), \ - (err_i *, err) -fobj_method(pioIteratePages); +#define mth__pioIteratePages pioPagesIterator_i, (path_t, path), \ + (int, segno), (datapagemap_t, pagemap), (XLogRecPtr, start_lsn), \ + (CompressAlg, calg), (int, clevel), \ + (uint32, checksum_version), (bool, strict), (err_i*, err) fobj_method(pioOpen); fobj_method(pioStat); @@ -330,6 +329,7 @@ fobj_method(pioListDir); fobj_method(pioRemoveDir); fobj_method(pioReadFile); fobj_method(pioWriteFile); +fobj_method(pioIteratePages); #define iface__pioDrive mth(pioOpen, pioStat, pioRemove, pioRename), \ mth(pioExists, pioGetCRC32, pioIsRemote), \ @@ -340,6 +340,27 @@ fobj_iface(pioDrive); extern pioDrive_i pioDriveForLocation(fio_location location); +struct doIteratePages_params { + path_t from_fullpath; + pgFile *file; + XLogRecPtr start_lsn; + CompressAlg calg; + int clevel; + uint32 checksum_version; + BackupMode backup_mode; + bool strict; + err_i *err; +}; + +extern pioPagesIterator_i +doIteratePages_impl(pioDrive_i drive, struct doIteratePages_params p); +#define doIteratePages(drive, ...) \ + doIteratePages_impl(drive, ((struct doIteratePages_params){ \ + .start_lsn = InvalidXLogRecPtr, \ + .calg = NONE_COMPRESS, .clevel = 0, \ + .strict = true, \ + __VA_ARGS__})) + #define mth__pioSetAsync err_i, (bool, async) #define mth__pioSetAsync__optional() (async, true) #define mth__pioAsyncRead size_t, (ft_bytes_t, buf), (err_i*, err) From 218e78468cf759991d20c54f59304d375177a405 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 8 Dec 2022 20:59:36 +0300 Subject: [PATCH 144/339] [PBCKP-338] move pioIteratePages to separate pioDBDrive. Looks like it is beginning of separation of db-aware drives. --- src/utils/file.c | 8 ++++---- src/utils/file.h | 10 ++++++---- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index 83387fb50..be5646ff6 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1873,7 +1873,7 @@ fio_iterate_pages_impl(pioDrive_i drive, int out, const char *path, err_i err = $noerr(); fio_header hdr = {.cop=FIO_ITERATE_DATA}; - pages = $i(pioIteratePages, drive, + pages = $(pioIteratePages, drive.self, .path = path, .segno = params->segno, .pagemap = pagemap, @@ -3534,13 +3534,13 @@ fio_communicate(int in, int out) typedef struct pioLocalDrive { } pioLocalDrive; -#define kls__pioLocalDrive iface__pioDrive, iface(pioDrive) +#define kls__pioLocalDrive iface__pioDBDrive, iface(pioDBDrive) fobj_klass(pioLocalDrive); typedef struct pioRemoteDrive { } pioRemoteDrive; -#define kls__pioRemoteDrive iface__pioDrive, iface(pioDrive) +#define kls__pioRemoteDrive iface__pioDBDrive, iface(pioDBDrive) fobj_klass(pioRemoteDrive); typedef struct pioFile @@ -5865,7 +5865,7 @@ pioRemotePagesIterator_pioNextPage(VSelf, PageIteratorValue *value) } pioPagesIterator_i -doIteratePages_impl(pioDrive_i drive, struct doIteratePages_params p) +doIteratePages_impl(pioIteratePages_i drive, struct doIteratePages_params p) { datapagemap_t pagemap = {0}; fobj_reset_err(p.err); diff --git a/src/utils/file.h b/src/utils/file.h index 3141e07be..f89eab26f 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -334,10 +334,12 @@ fobj_method(pioIteratePages); #define iface__pioDrive mth(pioOpen, pioStat, pioRemove, pioRename), \ mth(pioExists, pioGetCRC32, pioIsRemote), \ mth(pioMakeDir, pioListDir, pioRemoveDir), \ - mth(pioFilesAreSame, pioReadFile, pioWriteFile), \ - mth(pioIteratePages) + mth(pioFilesAreSame, pioReadFile, pioWriteFile) fobj_iface(pioDrive); +#define iface__pioDBDrive iface__pioDrive, mth(pioIteratePages) +fobj_iface(pioDBDrive); + extern pioDrive_i pioDriveForLocation(fio_location location); struct doIteratePages_params { @@ -353,9 +355,9 @@ struct doIteratePages_params { }; extern pioPagesIterator_i -doIteratePages_impl(pioDrive_i drive, struct doIteratePages_params p); +doIteratePages_impl(pioIteratePages_i drive, struct doIteratePages_params p); #define doIteratePages(drive, ...) \ - doIteratePages_impl(drive, ((struct doIteratePages_params){ \ + doIteratePages_impl($bind(pioIteratePages, drive.self), ((struct doIteratePages_params){ \ .start_lsn = InvalidXLogRecPtr, \ .calg = NONE_COMPRESS, .clevel = 0, \ .strict = true, \ From 8be1417ee9258bfba0501ecc5a6ddcf717edca4c Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 8 Dec 2022 21:48:44 +0300 Subject: [PATCH 145/339] [PBCKP-338] pioIteratePages: rename 'strict' parameter to 'just_validate' And don't send actual content for validation. --- src/data.c | 2 +- src/utils/file.c | 23 ++++++++++++----------- src/utils/file.h | 7 +++---- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/src/data.c b/src/data.c index c289421c5..3a9b3c56e 100644 --- a/src/data.c +++ b/src/data.c @@ -1328,7 +1328,7 @@ check_data_file(pgFile *file, const char *from_fullpath, uint32 checksum_version .file = file, .checksum_version = checksum_version, .backup_mode = BACKUP_MODE_FULL, - .strict = false, + .just_validate = true, .err = &err); if ($haserr(err)) { diff --git a/src/utils/file.c b/src/utils/file.c index be5646ff6..b8ae6d618 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -92,7 +92,7 @@ typedef struct __attribute__((packed)) CompressAlg calg; int clevel; uint32 checksum_version; - int strict; + int just_validate; } fio_iterate_pages_request; /* Convert FIO pseudo handle to index in file descriptor array */ @@ -1881,7 +1881,7 @@ fio_iterate_pages_impl(pioDrive_i drive, int out, const char *path, .calg = params->calg, .clevel = params->clevel, .checksum_version = params->checksum_version, - .strict = params->strict, + .just_validate = params->just_validate, .err = &err); if ($haserr(err)) @@ -5770,7 +5770,7 @@ typedef struct pioLocalPagesIterator BlockNumber blknum; BlockNumber n_blocks; - bool strict; + bool just_validate; int segno; datapagemap_t map; FILE *in; @@ -5795,7 +5795,7 @@ pioRemoteDrive_pioIteratePages(VSelf, path_t from_fullpath, int segno, datapagemap_t pagemap, XLogRecPtr start_lsn, CompressAlg calg, int clevel, - uint32 checksum_version, bool strict, err_i *err) + uint32 checksum_version, bool just_validate, err_i *err) { Self(pioRemoteDrive); fobj_t iter = {0}; @@ -5808,7 +5808,7 @@ pioRemoteDrive_pioIteratePages(VSelf, path_t from_fullpath, .calg = calg, .clevel = clevel, .checksum_version = checksum_version, - .strict = strict, + .just_validate = just_validate, }; ft_strbuf_catbytes(&buf, ft_bytes(&hdr, sizeof(hdr))); @@ -5897,7 +5897,7 @@ doIteratePages_impl(pioIteratePages_i drive, struct doIteratePages_params p) .calg = p.calg, .clevel = p.clevel, .checksum_version = p.checksum_version, - .strict = p.strict, + .just_validate = p.just_validate, .err = p.err); } @@ -5906,7 +5906,7 @@ pioLocalDrive_pioIteratePages(VSelf, path_t path, int segno, datapagemap_t pagemap, XLogRecPtr start_lsn, CompressAlg calg, int clevel, - uint32 checksum_version, bool strict, err_i *err) + uint32 checksum_version, bool just_validate, err_i *err) { Self(pioLocalDrive); fobj_t iter = {0}; @@ -5941,7 +5941,7 @@ pioLocalDrive_pioIteratePages(VSelf, path_t path, iter = $alloc(pioLocalPagesIterator, .segno = segno, .n_blocks = n_blocks, - .strict = strict, + .just_validate = just_validate, .from_fullpath = path, .map = pagemap, .in = in, @@ -5972,6 +5972,7 @@ pioLocalPagesIterator_pioNextPage(VSelf, PageIteratorValue *value) FOBJ_FUNC_ARP(); Self(pioLocalPagesIterator); + value->compressed_size = 0; while (self->blknum < self->n_blocks) { char page_buf[BLCKSZ]; @@ -5992,7 +5993,7 @@ pioLocalPagesIterator_pioNextPage(VSelf, PageIteratorValue *value) value->page_result = rc; if (rc == PageIsTruncated) break; - if (rc == PageIsOk) + if (rc == PageIsOk && !self->just_validate) { value->compressed_size = compress_page(value->compressed_page, BLCKSZ, value->blknum, page_buf, self->calg, @@ -6114,7 +6115,7 @@ prepare_page(pioLocalPagesIterator *iter, BlockNumber blknum, Page page, PageSta /* Error out in case of merge or backup without ptrack support; * issue warning in case of checkdb or backup with ptrack support */ - if (!iter->strict) + if (iter->just_validate) elevel = WARNING; if (errormsg) @@ -6129,7 +6130,7 @@ prepare_page(pioLocalPagesIterator *iter, BlockNumber blknum, Page page, PageSta } /* Checkdb not going futher */ - if (!iter->strict) + if (iter->just_validate) return PageIsOk; /* diff --git a/src/utils/file.h b/src/utils/file.h index f89eab26f..47fa4c0d3 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -271,7 +271,6 @@ typedef struct PageState state; BlockNumber blknum; int page_result; - int compression; size_t compressed_size; char compressed_page[BLCKSZ]; /* MUST be last */ } PageIteratorValue; @@ -314,7 +313,7 @@ fobj_iface(pioPagesIterator); #define mth__pioIteratePages pioPagesIterator_i, (path_t, path), \ (int, segno), (datapagemap_t, pagemap), (XLogRecPtr, start_lsn), \ (CompressAlg, calg), (int, clevel), \ - (uint32, checksum_version), (bool, strict), (err_i*, err) + (uint32, checksum_version), (bool, just_validate), (err_i*, err) fobj_method(pioOpen); fobj_method(pioStat); @@ -350,7 +349,7 @@ struct doIteratePages_params { int clevel; uint32 checksum_version; BackupMode backup_mode; - bool strict; + bool just_validate; err_i *err; }; @@ -360,7 +359,7 @@ doIteratePages_impl(pioIteratePages_i drive, struct doIteratePages_params p); doIteratePages_impl($bind(pioIteratePages, drive.self), ((struct doIteratePages_params){ \ .start_lsn = InvalidXLogRecPtr, \ .calg = NONE_COMPRESS, .clevel = 0, \ - .strict = true, \ + .just_validate = false, \ __VA_ARGS__})) #define mth__pioSetAsync err_i, (bool, async) From 3b86b76dbeb65202938bb37c6cc8e4898313c395 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 8 Dec 2022 22:49:52 +0300 Subject: [PATCH 146/339] [PBCKP-338] use n_blocks returned by pages iterator. pages iterator could actually be more accurate. In future, we could read till the end of file instead of setting n_blocks at iteration start. --- src/data.c | 10 ++--- src/utils/file.c | 105 +++++++++++++++++++++++++++++++++++------------ src/utils/file.h | 4 +- 3 files changed, 84 insertions(+), 35 deletions(-) diff --git a/src/data.c b/src/data.c index 3a9b3c56e..06e0433ae 100644 --- a/src/data.c +++ b/src/data.c @@ -434,13 +434,6 @@ backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpat ft_logerr(FT_FATAL, $errmsg(err), "Copying data file \"%s\"", file->rel_path); } - /* refresh n_blocks for FULL and DELTA */ - if (backup_mode == BACKUP_MODE_FULL || - backup_mode == BACKUP_MODE_DIFF_DELTA) - { - file->n_blocks = ft_div_i64u32_to_i32(file->read_size, BLCKSZ); - } - /* Determine that file didn`t changed in case of incremental backup */ if (backup_mode != BACKUP_MODE_FULL && file->exists_in_prev && @@ -1845,6 +1838,7 @@ send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, } file->read_size += BLCKSZ; } + file->n_blocks = $i(pioFinalPageN, pages); /* * Add dummy header, so we can later extract the length of last header @@ -1932,6 +1926,8 @@ copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, file->read_size += BLCKSZ; } + file->n_blocks = $i(pioFinalPageN, pages); + file->size = (int64_t)file->n_blocks * BLCKSZ; err = $i(pioTruncate, out, file->size); if ($haserr(err)) return $iresult(err); diff --git a/src/utils/file.c b/src/utils/file.c index b8ae6d618..bb82a50b9 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1872,6 +1872,7 @@ fio_iterate_pages_impl(pioDrive_i drive, int out, const char *path, pioPagesIterator_i pages; err_i err = $noerr(); fio_header hdr = {.cop=FIO_ITERATE_DATA}; + BlockNumber finalN; pages = $(pioIteratePages, drive.self, .path = path, @@ -1913,10 +1914,17 @@ fio_iterate_pages_impl(pioDrive_i drive, int out, const char *path, IO_CHECK(fio_write_all(out, req.ptr, req.len), req.len); } - ft_strbuf_free(&req); - hdr = (fio_header){.cop = FIO_ITERATE_EOF}; - IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); + ft_strbuf_reset_for_reuse(&req); + + finalN = $i(pioFinalPageN, pages); + hdr = (fio_header){.cop = FIO_ITERATE_EOF, .size = sizeof(finalN)}; + ft_strbuf_catbytes(&req, ft_bytes(&hdr, sizeof(hdr))); + ft_strbuf_catbytes(&req, ft_bytes(&finalN, sizeof(finalN))); + + IO_CHECK(fio_write_all(out, req.ptr, req.len), req.len); + + ft_strbuf_free(&req); } typedef struct send_file_state { @@ -5763,11 +5771,13 @@ pio_line_reader_getline(pio_line_reader *r, err_i *err) typedef struct pioRemotePagesIterator { bool valid; + BlockNumber n_blocks; } pioRemotePagesIterator; typedef struct pioLocalPagesIterator { BlockNumber blknum; + BlockNumber lastblkn; BlockNumber n_blocks; bool just_validate; @@ -5835,6 +5845,8 @@ pioRemotePagesIterator_pioNextPage(VSelf, PageIteratorValue *value) fio_header hdr; + value->compressed_size = 0; + if (!self->valid) { value->page_result = PageIsTruncated; return $noerr(); @@ -5847,7 +5859,9 @@ pioRemotePagesIterator_pioNextPage(VSelf, PageIteratorValue *value) } else if (hdr.cop == FIO_ITERATE_EOF) { + ft_assert(hdr.size == sizeof(BlockNumber)); self->valid = false; + IO_CHECK(fio_read_all(fio_stdin, &self->n_blocks, sizeof(self->n_blocks)), sizeof(self->n_blocks)); value->page_result = PageIsTruncated; return $noerr(); } @@ -5864,6 +5878,13 @@ pioRemotePagesIterator_pioNextPage(VSelf, PageIteratorValue *value) intCode(hdr.cop)); } +static BlockNumber +pioRemotePagesIterator_pioFinalPageN(VSelf) +{ + Self(pioRemotePagesIterator); + return self->n_blocks; +} + pioPagesIterator_i doIteratePages_impl(pioIteratePages_i drive, struct doIteratePages_params p) { @@ -5971,40 +5992,70 @@ pioLocalPagesIterator_pioNextPage(VSelf, PageIteratorValue *value) { FOBJ_FUNC_ARP(); Self(pioLocalPagesIterator); + char page_buf[BLCKSZ]; + BlockNumber blknum; + BlockNumber n_blocks; + int rc = PageIsOk; + blknum = self->blknum; value->compressed_size = 0; - while (self->blknum < self->n_blocks) - { - char page_buf[BLCKSZ]; - BlockNumber blknum = self->blknum; + if (self->blknum >= self->n_blocks) + goto truncated; - /* next block */ - if (self->map.bitmapsize && - !datapagemap_first(self->map, &blknum)) - { - self->blknum = self->n_blocks; - break; - } + /* next block */ + if (self->map.bitmapsize && + !datapagemap_first(self->map, &blknum)) + { + self->blknum = self->n_blocks; + goto truncated; + } - value->blknum = blknum; - self->blknum = blknum+1; + value->blknum = blknum; + self->blknum = blknum+1; - int rc = prepare_page(self, blknum, page_buf, &value->state); - value->page_result = rc; - if (rc == PageIsTruncated) - break; - if (rc == PageIsOk && !self->just_validate) - { - value->compressed_size = compress_page(value->compressed_page, BLCKSZ, - value->blknum, page_buf, self->calg, - self->clevel, self->from_fullpath); - } - return $noerr(); + rc = prepare_page(self, blknum, page_buf, &value->state); + value->page_result = rc; + if (rc == PageIsTruncated) + goto re_stat; + self->lastblkn = blknum+1; + if (rc == PageIsOk && !self->just_validate) + { + value->compressed_size = compress_page(value->compressed_page, BLCKSZ, + value->blknum, page_buf, self->calg, + self->clevel, self->from_fullpath); } + return $noerr(); + +re_stat: + { + /* + * prepare_page found file is shorter than expected. + * Lets re-investigate its length. + */ + struct stat st; + int fd = fileno(self->in); + if (fstat(fd, &st) < 0) + return $syserr(errno, "Re-stat-ting file {path}", + path(self->from_fullpath)); + n_blocks = ft_div_i64u32_to_i32(st.st_size, BLCKSZ); + /* we should not "forget" already produced pages */ + if (n_blocks < self->lastblkn) + n_blocks = self->lastblkn; + if (n_blocks < self->n_blocks) + self->n_blocks = blknum; + } +truncated: value->page_result = PageIsTruncated; return $noerr(); } +static BlockNumber +pioLocalPagesIterator_pioFinalPageN(VSelf) +{ + Self(pioLocalPagesIterator); + return self->n_blocks; +} + /* * Retrieves a page taking the backup mode into account * and writes it into argument "page". Argument "page" diff --git a/src/utils/file.h b/src/utils/file.h index 47fa4c0d3..84d10a277 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -276,10 +276,12 @@ typedef struct } PageIteratorValue; #define mth__pioNextPage err_i, (PageIteratorValue *, value) +#define mth__pioFinalPageN BlockNumber fobj_method(pioNextPage); +fobj_method(pioFinalPageN); -#define iface__pioPagesIterator mth(pioNextPage) +#define iface__pioPagesIterator mth(pioNextPage, pioFinalPageN) fobj_iface(pioPagesIterator); From f5616ea04c310425e0c3f7c4150053897cabfbc9 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 8 Dec 2022 23:11:53 +0300 Subject: [PATCH 147/339] get rid of fio_pread btw, it is not accurate about error --- src/utils/file.c | 50 ++++++++---------------------------------------- src/utils/file.h | 1 - 2 files changed, 8 insertions(+), 43 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index bb82a50b9..1cd52c7b1 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -748,46 +748,6 @@ fio_truncate(int fd, off_t size) } } - -/* - * Read file from specified location. - */ -int -fio_pread(FILE* f, void* buf, off_t offs) -{ - if (fio_is_remote_file(f)) - { - int fd = fio_fileno(f); - fio_header hdr; - - hdr.cop = FIO_PREAD; - hdr.handle = fd & ~FIO_PIPE_MARKER; - hdr.size = 0; - hdr.arg = offs; - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - - IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); - Assert(hdr.cop == FIO_SEND); - if (hdr.size != 0) - IO_CHECK(fio_read_all(fio_stdin, buf, hdr.size), hdr.size); - - /* TODO: error handling */ - - return hdr.arg; - } - else - { - /* For local file, opened by fopen, we should use stdio functions */ - int rc = fseek(f, offs, SEEK_SET); - - if (rc < 0) - return rc; - - return fread(buf, 1, BLCKSZ, f); - } -} - /* Set position in stdio file */ int fio_fseek(FILE* f, off_t offs) @@ -6093,8 +6053,14 @@ prepare_page(pioLocalPagesIterator *iter, BlockNumber blknum, Page page, PageSta */ while (!page_is_valid && try_again--) { - /* read the block */ - int read_len = fio_pread(iter->in, page, blknum * BLCKSZ); + int read_len = fseeko(iter->in, (off_t)blknum * BLCKSZ, SEEK_SET); + if (read_len == 0) /* seek is successful */ + { + /* read the block */ + read_len = fread(page, 1, BLCKSZ, iter->in); + if (read_len == 0 && ferror(iter->in)) + read_len = -1; + } /* The block could have been truncated. It is fine. */ if (read_len == 0) diff --git a/src/utils/file.h b/src/utils/file.h index 84d10a277..676017413 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -160,7 +160,6 @@ extern ssize_t fio_fwrite_async_compressed(FILE* f, void const* buf, size_t size extern size_t fio_fwrite_async(FILE* f, void const* buf, size_t size); extern int fio_check_error_file(FILE* f, char **errmsg); extern ssize_t fio_fread(FILE* f, void* buf, size_t size); -extern int fio_pread(FILE* f, void* buf, off_t offs); extern int fio_fprintf(FILE* f, const char* arg, ...) pg_attribute_printf(2, 3); extern int fio_fflush(FILE* f); extern int fio_fseek(FILE* f, off_t offs); From 2b387ed853325b0ebf1ce725fc9adcb6477590af Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 8 Dec 2022 23:47:38 +0300 Subject: [PATCH 148/339] remove datapagemap_iterator and optimize datapatemap_first --- src/datapagemap.c | 64 ++++++----------------------------------------- src/datapagemap.h | 3 --- src/util.c | 8 ++---- 3 files changed, 10 insertions(+), 65 deletions(-) diff --git a/src/datapagemap.c b/src/datapagemap.c index 6c7096f37..15f70893c 100644 --- a/src/datapagemap.c +++ b/src/datapagemap.c @@ -14,12 +14,6 @@ #include "datapagemap.h" -struct datapagemap_iterator -{ - datapagemap_t map; - BlockNumber nextblkno; -}; - /***** * Public functions */ @@ -73,66 +67,24 @@ datapagemap_first(datapagemap_t map, BlockNumber *start_and_result) { int nextoff = blk / 8; int bitno = blk % 8; + unsigned char c; if (nextoff >= map.bitmapsize) break; - if (map.bitmap[nextoff] & (1 << bitno)) + c = map.bitmap[nextoff] >> bitno; + if (c == 0) + blk += 8 - bitno; + else if (c&1) { *start_and_result = blk; return true; } - blk++; + else + blk += ffs(c)-1; } /* no more set bits in this bitmap. */ *start_and_result = UINT32_MAX; return false; -} - -/* - * Start iterating through all entries in the page map. - * - * After datapagemap_iterate, call datapagemap_next to return the entries, - * until it returns false. After you're done, use pg_free() to destroy the - * iterator. - */ -datapagemap_iterator_t * -datapagemap_iterate(datapagemap_t *map) -{ - datapagemap_iterator_t *iter; - - iter = pg_malloc(sizeof(datapagemap_iterator_t)); - iter->map = *map; - iter->nextblkno = 0; - - return iter; -} - -bool -datapagemap_next(datapagemap_iterator_t *iter, BlockNumber *blkno) -{ - datapagemap_t *map = &iter->map; - - for (;;) - { - BlockNumber blk = iter->nextblkno; - int nextoff = blk / 8; - int bitno = blk % 8; - - if (nextoff >= map->bitmapsize) - break; - - iter->nextblkno++; - - if (map->bitmap[nextoff] & (1 << bitno)) - { - *blkno = blk; - return true; - } - } - - /* no more set bits in this bitmap. */ - return false; -} - +} \ No newline at end of file diff --git a/src/datapagemap.h b/src/datapagemap.h index cff243362..33000392c 100644 --- a/src/datapagemap.h +++ b/src/datapagemap.h @@ -25,11 +25,8 @@ struct datapagemap }; typedef struct datapagemap datapagemap_t; -typedef struct datapagemap_iterator datapagemap_iterator_t; extern void datapagemap_add(datapagemap_t *map, BlockNumber blkno); extern bool datapagemap_first(datapagemap_t map, BlockNumber *start_and_result); -extern datapagemap_iterator_t *datapagemap_iterate(datapagemap_t *map); -extern bool datapagemap_next(datapagemap_iterator_t *iter, BlockNumber *blkno); #endif /* DATAPAGEMAP_H */ diff --git a/src/util.c b/src/util.c index 5e9699a2f..2db650a8e 100644 --- a/src/util.c +++ b/src/util.c @@ -501,14 +501,10 @@ datapagemap_is_set(datapagemap_t *map, BlockNumber blkno) void datapagemap_print_debug(datapagemap_t *map) { - datapagemap_iterator_t *iter; - BlockNumber blocknum; + BlockNumber blocknum = 0; - iter = datapagemap_iterate(map); - while (datapagemap_next(iter, &blocknum)) + for (;datapagemap_first(*map, &blocknum); blocknum++) elog(VERBOSE, " block %u", blocknum); - - pg_free(iter); } const char* From 5dfb164d2a02de79e49bd6265998f43f8d5135f6 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 9 Dec 2022 00:55:26 +0300 Subject: [PATCH 149/339] send_pages: simplify headers handling --- src/data.c | 32 +++++++++++--------------------- 1 file changed, 11 insertions(+), 21 deletions(-) diff --git a/src/data.c b/src/data.c index 06e0433ae..fe052d017 100644 --- a/src/data.c +++ b/src/data.c @@ -1765,6 +1765,10 @@ open_local_file_rw(const char *to_fullpath, char **out_buf, uint32 buf_size) return out; } +#define FT_SLICE bpph2 +#define FT_SLICE_TYPE BackupPageHeader2 +#include + /* backup local file */ static err_i send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, @@ -1779,8 +1783,7 @@ send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, pioFile_i out = $null(pioFile); pioWriteFlush_i wrapped = $null(pioWriteFlush); pioCRC32Counter *crc32 = NULL; - BackupPageHeader2 *header = NULL; - parray *harray = NULL; + ft_arr_bpph2_t harray = ft_arr_init(); err_i err = $noerr(); pages = doIteratePages(db_location, .from_fullpath = from_fullpath, .file = file, @@ -1790,7 +1793,6 @@ send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, if ($haserr(err)) return $iresult(err); - harray = parray_new(); while (true) { PageIteratorValue value; @@ -1813,14 +1815,12 @@ send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, file->compress_alg = calg; } - header = pgut_new0(BackupPageHeader2); - *header = (BackupPageHeader2){ + ft_arr_bpph2_push(&harray, (BackupPageHeader2){ .block = value.blknum, .pos = file->write_size, .lsn = value.state.lsn, .checksum = value.state.checksum, - }; - parray_append(harray, header); + }); file->uncompressed_size += BLCKSZ; file->write_size += backup_page($reduce(pioWrite, wrapped), value.blknum, @@ -1844,22 +1844,12 @@ send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, * Add dummy header, so we can later extract the length of last header * as difference between their offsets. */ - if (parray_num(harray) > 0) + if (harray.len > 0) { - size_t hdr_num = parray_num(harray); - size_t i; - - file->n_headers = (int) hdr_num; /* is it valid? */ - *headers = (BackupPageHeader2 *) pgut_malloc0((hdr_num + 1) * sizeof(BackupPageHeader2)); - for (i = 0; i < hdr_num; i++) - { - header = (BackupPageHeader2 *)parray_get(harray, i); - (*headers)[i] = *header; - pg_free(header); - } - (*headers)[hdr_num] = (BackupPageHeader2){.pos=file->write_size}; + file->n_headers = harray.len; + ft_arr_bpph2_push(&harray, (BackupPageHeader2){.pos=file->write_size}); + *headers = harray.ptr; } - parray_free(harray); /* close local output file */ if ($notNULL(out)) From e3c322a926706eba7c312ee3f116d1f9968acfdc Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 9 Dec 2022 10:08:57 +0300 Subject: [PATCH 150/339] buffer for pioLocalPagesIterator --- src/pg_probackup.h | 2 ++ src/utils/file.c | 13 ++++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 4a7f06c33..ca2e6e2e6 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -105,7 +105,9 @@ extern const char *PROGRAM_EMAIL; #define STDIO_BUFSIZE 65536 #define ERRMSG_MAX_LEN 2048 +#define SMALL_CHUNK_SIZE (32 * 1024) #define CHUNK_SIZE (128 * 1024) +#define MEDIUM_CHUNK_SIZE (512 * 1024) #define LARGE_CHUNK_SIZE (4 * 1024 * 1024) #define IN_BUF_SIZE (512 * 1024) #define OUT_BUF_SIZE (512 * 1024) diff --git a/src/utils/file.c b/src/utils/file.c index 1cd52c7b1..72807dcda 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -5744,6 +5744,7 @@ typedef struct pioLocalPagesIterator int segno; datapagemap_t map; FILE *in; + void *buf; const char *from_fullpath; /* prev_backup_start_lsn */ XLogRecPtr start_lsn; @@ -5893,6 +5894,8 @@ pioLocalDrive_pioIteratePages(VSelf, path_t path, fobj_t iter = {0}; BlockNumber n_blocks; FILE *in; + void *buf; + size_t bufsz; int fd; struct stat st; @@ -5909,9 +5912,15 @@ pioLocalDrive_pioIteratePages(VSelf, path_t path, fd = fileno(in); if (fstat(fd, &st) == -1) { + fclose(in); *err = $syserr(errno, "Cannot stat datafile"); + return $null(pioPagesIterator); } + bufsz = pagemap.bitmapsize > 0 ? SMALL_CHUNK_SIZE : MEDIUM_CHUNK_SIZE; + buf = ft_malloc(bufsz); + setvbuf(in, buf, _IOFBF, bufsz); + /* * Compute expected number of blocks in the file. * NOTE This is a normal situation, if the file size has changed @@ -5926,6 +5935,7 @@ pioLocalDrive_pioIteratePages(VSelf, path_t path, .from_fullpath = path, .map = pagemap, .in = in, + .buf = buf, .start_lsn = start_lsn, .calg = calg, .clevel = clevel, @@ -5939,7 +5949,8 @@ pioLocalPagesIterator_fobjDispose(VSelf) { Self(pioLocalPagesIterator); - if(self->in) fclose(self->in); + if (self->buf) ft_free(self->buf); + if (self->in) fclose(self->in); } static int32 prepare_page(pioLocalPagesIterator *iter, From 6898075741b852be3512f22cd95fe5de2671f6ec Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 9 Dec 2022 10:15:13 +0300 Subject: [PATCH 151/339] remove some already unused functions --- src/utils/file.c | 107 ----------------------------------------------- src/utils/file.h | 6 --- 2 files changed, 113 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index 72807dcda..051247e86 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -609,33 +609,6 @@ fio_fopen(fio_location location, const char* path, const char* mode) return f; } -/* Format output to file stream */ -int -fio_fprintf(FILE* f, const char* format, ...) -{ - int rc; - va_list args; - va_start (args, format); - if (fio_is_remote_file(f)) - { - char buf[PRINTF_BUF_SIZE]; -#ifdef HAS_VSNPRINTF - rc = vsnprintf(buf, sizeof(buf), format, args); -#else - rc = vsprintf(buf, format, args); -#endif - if (rc > 0) { - fio_fwrite(f, buf, rc); - } - } - else - { - rc = vfprintf(f, format, args); - } - va_end (args); - return rc; -} - /* Flush stream data (does nothing for remote file) */ int fio_fflush(FILE* f) @@ -646,13 +619,6 @@ fio_fflush(FILE* f) return rc; } -/* Sync file to the disk (does nothing for remote file) */ -int -fio_flush(int fd) -{ - return fio_is_remote_fd(fd) ? 0 : fsync(fd); -} - /* Close output stream */ int fio_fclose(FILE* f) @@ -800,16 +766,6 @@ fio_seek_impl(int fd, off_t offs) } } -/* Write data to stdio file */ -size_t -fio_fwrite(FILE* f, void const* buf, size_t size) -{ - if (fio_is_remote_file(f)) - return fio_write(fio_fileno(f), buf, size); - else - return fwrite(buf, 1, size, f); -} - /* * Write buffer to descriptor by calling write(), * If size of written data is less than buffer size, @@ -1050,32 +1006,6 @@ fio_check_error_file(FILE* f, char **errmsg) return 0; } -/* check if remote agent encountered any error during execution of async operations */ -int -fio_check_error_fd(int fd, char **errmsg) -{ - if (fio_is_remote_fd(fd)) - { - fio_header hdr; - - hdr.cop = FIO_GET_ASYNC_ERROR; - hdr.size = 0; - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - - /* check results */ - IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); - - if (hdr.size > 0) - { - *errmsg = pgut_malloc(ERRMSG_MAX_LEN); - IO_CHECK(fio_read_all(fio_stdin, *errmsg, hdr.size), hdr.size); - return 1; - } - } - return 0; -} - static void fio_get_async_error_impl(int out) { @@ -1105,17 +1035,6 @@ fio_get_async_error_impl(int out) } } -/* Read data from stdio file */ -ssize_t -fio_fread(FILE* f, void* buf, size_t size) -{ - size_t rc; - if (fio_is_remote_file(f)) - return fio_read(fio_fileno(f), buf, size); - rc = fread(buf, 1, size, f); - return rc == 0 && !feof(f) ? -1 : rc; -} - /* Read data from file */ ssize_t fio_read(int fd, void* buf, size_t size) @@ -1584,32 +1503,6 @@ typedef struct fioGZFile Bytef buf[ZLIB_BUFFER_SIZE]; } fioGZFile; -/* check if remote agent encountered any error during execution of async operations */ -int -fio_check_error_fd_gz(gzFile f, char **errmsg) -{ - if (f && ((size_t)f & FIO_GZ_REMOTE_MARKER)) - { - fio_header hdr; - - hdr.cop = FIO_GET_ASYNC_ERROR; - hdr.size = 0; - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - - /* check results */ - IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); - - if (hdr.size > 0) - { - *errmsg = pgut_malloc(ERRMSG_MAX_LEN); - IO_CHECK(fio_read_all(fio_stdin, *errmsg, hdr.size), hdr.size); - return 1; - } - } - return 0; -} - /* On error returns NULL and errno should be checked */ gzFile fio_gzopen(fio_location location, const char* path, const char* mode, int level) diff --git a/src/utils/file.h b/src/utils/file.h index 676017413..cd73a4d2e 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -145,22 +145,16 @@ extern void fio_get_agent_version(int* protocol, char* payload_buf, size_t pa extern int fio_open(fio_location location, const char* name, int mode); extern ssize_t fio_write(int fd, void const* buf, size_t size); extern ssize_t fio_write_async(int fd, void const* buf, size_t size); -extern int fio_check_error_fd(int fd, char **errmsg); -extern int fio_check_error_fd_gz(gzFile f, char **errmsg); extern ssize_t fio_read(int fd, void* buf, size_t size); -extern int fio_flush(int fd); extern int fio_seek(int fd, off_t offs); extern int fio_truncate(int fd, off_t size); extern int fio_close(int fd); /* FILE-style functions */ extern FILE* fio_fopen(fio_location location, const char* name, const char* mode); -extern size_t fio_fwrite(FILE* f, void const* buf, size_t size); extern ssize_t fio_fwrite_async_compressed(FILE* f, void const* buf, size_t size, int compress_alg); extern size_t fio_fwrite_async(FILE* f, void const* buf, size_t size); extern int fio_check_error_file(FILE* f, char **errmsg); -extern ssize_t fio_fread(FILE* f, void* buf, size_t size); -extern int fio_fprintf(FILE* f, const char* arg, ...) pg_attribute_printf(2, 3); extern int fio_fflush(FILE* f); extern int fio_fseek(FILE* f, off_t offs); extern int fio_ftruncate(FILE* f, off_t size); From 36ff719717ec7978c73bdb0e241fe513e5a9a2db Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 9 Dec 2022 10:21:42 +0300 Subject: [PATCH 152/339] and another one unused function --- src/utils/file.c | 35 ----------------------------------- src/utils/file.h | 1 - 2 files changed, 36 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index 051247e86..d3678e95a 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -794,41 +794,6 @@ durable_write(int fd, const char* buf, size_t size) return size; } -/* Write data to the file synchronously */ -ssize_t -fio_write(int fd, void const* buf, size_t size) -{ - if (fio_is_remote_fd(fd)) - { - fio_header hdr = { - .cop = FIO_WRITE, - .handle = fd & ~FIO_PIPE_MARKER, - .size = size, - .arg = 0, - }; - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(fio_stdout, buf, size), size); - - /* check results */ - IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); - Assert(hdr.cop == FIO_WRITE); - - /* set errno */ - if (hdr.arg > 0) - { - errno = hdr.arg; - return -1; - } - - return size; - } - else - { - return durable_write(fd, buf, size); - } -} - static void fio_write_impl(int fd, void const* buf, size_t size, int out) { diff --git a/src/utils/file.h b/src/utils/file.h index cd73a4d2e..a20638813 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -143,7 +143,6 @@ extern void fio_get_agent_version(int* protocol, char* payload_buf, size_t pa /* fd-style functions */ extern int fio_open(fio_location location, const char* name, int mode); -extern ssize_t fio_write(int fd, void const* buf, size_t size); extern ssize_t fio_write_async(int fd, void const* buf, size_t size); extern ssize_t fio_read(int fd, void* buf, size_t size); extern int fio_seek(int fd, off_t offs); From 77b47e20d7e3624246cdc4ee483cad8c5753b349 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sat, 10 Dec 2022 16:29:30 +0300 Subject: [PATCH 153/339] remove fetch.c because functions defined here are unused --- Makefile | 2 +- src/fetch.c | 113 --------------------------------------------- src/pg_probackup.h | 8 ---- 3 files changed, 1 insertion(+), 122 deletions(-) delete mode 100644 src/fetch.c diff --git a/Makefile b/Makefile index 16b645c69..e34d49934 100644 --- a/Makefile +++ b/Makefile @@ -34,7 +34,7 @@ PROGRAM := pg_probackup OBJS := src/utils/configuration.o src/utils/json.o src/utils/logger.o \ src/utils/parray.o src/utils/pgut.o src/utils/thread.o src/utils/remote.o src/utils/file.o OBJS += src/archive.o src/backup.o src/catalog.o src/checkdb.o src/configure.o src/data.o \ - src/delete.o src/dir.o src/fetch.o src/help.o src/init.o src/merge.o \ + src/delete.o src/dir.o src/help.o src/init.o src/merge.o \ src/parsexlog.o src/ptrack.o src/pg_probackup.o src/restore.o src/show.o src/stream.o \ src/util.o src/validate.o src/datapagemap.o src/catchup.o \ src/compatibility/pg-11.o src/utils/simple_prompt.o diff --git a/src/fetch.c b/src/fetch.c deleted file mode 100644 index d283af129..000000000 --- a/src/fetch.c +++ /dev/null @@ -1,113 +0,0 @@ -/*------------------------------------------------------------------------- - * - * fetch.c - * Functions for fetching files from PostgreSQL data directory - * - * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group - * - *------------------------------------------------------------------------- - */ - -#include "pg_probackup.h" - -#include - -/* - * Read a file into memory. The file to be read is /. - * The file contents are returned in a malloc'd buffer, and *filesize - * is set to the length of the file. - * - * The returned buffer is always zero-terminated; the size of the returned - * buffer is actually *filesize + 1. That's handy when reading a text file. - * This function can be used to read binary files as well, you can just - * ignore the zero-terminator in that case. - * - */ -char * -slurpFile(fio_location location, const char *datadir, const char *path, size_t *filesize, bool safe) -{ - int fd; - char *buffer; - pio_stat_t statbuf; - char fullpath[MAXPGPATH]; - size_t len; - err_i err; - - join_path_components(fullpath, datadir, path); - - if ((fd = fio_open(location, fullpath, O_RDONLY | PG_BINARY)) == -1) - { - if (safe) - return NULL; - else - elog(ERROR, "Could not open file \"%s\" for reading: %s", - fullpath, strerror(errno)); - } - - statbuf = $i(pioStat, pioDriveForLocation(location), .path = fullpath, - .follow_symlink = true, .err = &err); - if ($haserr(err)) - { - if (safe) - return NULL; - else - ft_logerr(FT_FATAL, $errmsg(err), "slurpFile"); - } - - if (statbuf.pst_size > SSIZE_MAX) - ft_log(FT_FATAL, "file \"%s\" is too large: %lld", - fullpath, (long long)statbuf.pst_size); - - len = statbuf.pst_size; - buffer = pg_malloc(len + 1); - - if (fio_read(fd, buffer, len) != len) - { - if (safe) - return NULL; - else - elog(ERROR, "Could not read file \"%s\": %s\n", - fullpath, strerror(errno)); - } - - fio_close(fd); - - /* Zero-terminate the buffer. */ - buffer[len] = '\0'; - - if (filesize) - *filesize = len; - return buffer; -} - -/* - * Receive a single file as a malloc'd buffer. - */ -char * -fetchFile(PGconn *conn, const char *filename, size_t *filesize) -{ - PGresult *res; - char *result; - const char *params[1]; - int len; - - params[0] = filename; - res = pgut_execute_extended(conn, "SELECT pg_catalog.pg_read_binary_file($1)", - 1, params, false, false); - - /* sanity check the result set */ - if (PQntuples(res) != 1 || PQgetisnull(res, 0, 0)) - elog(ERROR, "unexpected result set while fetching remote file \"%s\"", - filename); - - /* Read result to local variables */ - len = PQgetlength(res, 0, 0); - result = pg_malloc(len + 1); - memcpy(result, PQgetvalue(res, 0, 0), len); - result[len] = '\0'; - - PQclear(res); - *filesize = len; - - return result; -} diff --git a/src/pg_probackup.h b/src/pg_probackup.h index ca2e6e2e6..9a92d61d0 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -862,14 +862,6 @@ extern int do_delete_instance(InstanceState *instanceState); extern void do_delete_status(InstanceState *instanceState, InstanceConfig *instance_config, const char *status); -/* in fetch.c */ -extern char *slurpFile(fio_location location, - const char *datadir, - const char *path, - size_t *filesize, - bool safe); -extern char *fetchFile(PGconn *conn, const char *filename, size_t *filesize); - /* in help.c */ extern void help_print_version(void); extern void help_pg_probackup(void); From 52a42a856be1263aacd4635c9558607e969f2993 Mon Sep 17 00:00:00 2001 From: Sergey Fukanchik Date: Thu, 8 Dec 2022 12:51:32 +0300 Subject: [PATCH 154/339] =?UTF-8?q?PBCKP-390=20=D0=9E=D1=81=D0=B2=D0=BE?= =?UTF-8?q?=D0=B1=D0=BE=D0=B6=D0=B4=D0=B0=D1=82=D1=8C=20PGresult=20=D0=B2?= =?UTF-8?q?=20get=5Fdatabase=5Fmap?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/backup.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/backup.c b/src/backup.c index 90bfb439f..2068f0634 100644 --- a/src/backup.c +++ b/src/backup.c @@ -1181,6 +1181,8 @@ get_database_map(PGconn *conn) parray_append(database_map, db_entry); } + PQclear(res); + return database_map; } From 3d7d28ecd1ac0315b95b71b954c33c82ff3ae3f6 Mon Sep 17 00:00:00 2001 From: Daniel Shelepanov Date: Mon, 5 Dec 2022 20:29:52 +0300 Subject: [PATCH 155/339] [PBCKP-373] Got rid of the fio_access function fio_access calls have either been removed or replaced by pioExists --- src/archive.c | 2 +- src/backup.c | 8 ++++++-- src/catchup.c | 3 ++- src/configure.c | 8 -------- src/dir.c | 3 ++- src/restore.c | 5 ++++- src/utils/file.c | 38 +++----------------------------------- src/utils/file.h | 5 +++-- 8 files changed, 21 insertions(+), 51 deletions(-) diff --git a/src/archive.c b/src/archive.c index 1c1b5e419..3abecfbd0 100644 --- a/src/archive.c +++ b/src/archive.c @@ -504,7 +504,7 @@ push_file_internal(const char *wal_file_name, const char *pg_xlog_dir, part_opened: elog(LOG, "Temp WAL file successfully created: \"%s\"", to_fullpath_part); - if ($i(pioExists, backup_drive, to_fullpath, &err)) + if ($i(pioExists, backup_drive, .path = to_fullpath, .err = &err)) { pg_crc32 crc32_src; pg_crc32 crc32_dst; diff --git a/src/backup.c b/src/backup.c index 2068f0634..91fdc4a1b 100644 --- a/src/backup.c +++ b/src/backup.c @@ -231,9 +231,13 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, if (current.backup_mode == BACKUP_MODE_DIFF_PAGE || !current.stream) { /* Check that archive_dir can be reached */ - if (fio_access(FIO_BACKUP_HOST, instanceState->instance_wal_subdir_path, F_OK) != 0) + err_i err = $noerr(); + + if (!$i(pioExists, current.backup_location, .path = instanceState->instance_wal_subdir_path, + .expected_kind = PIO_KIND_DIRECTORY, .err = &err)) elog(ERROR, "WAL archive directory is not accessible \"%s\": %s", - instanceState->instance_wal_subdir_path, strerror(errno)); + instanceState->instance_wal_subdir_path, + $haserr(err) ? $errmsg(err) : "no such file or directory"); /* * Do not wait start_lsn for stream backup. diff --git a/src/catchup.c b/src/catchup.c index 8ad187606..affc40578 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -144,9 +144,10 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, if (current.backup_mode != BACKUP_MODE_FULL) { char backup_label_filename[MAXPGPATH]; + err_i err = $noerr(); join_path_components(backup_label_filename, dest_pgdata, PG_BACKUP_LABEL_FILE); - if (fio_access(FIO_LOCAL_HOST, backup_label_filename, F_OK) == 0) + if($i(pioExists, current.backup_location, .path = backup_label_filename, .err = &err)) elog(ERROR, "Destination directory contains \"" PG_BACKUP_LABEL_FILE "\" file"); } diff --git a/src/configure.c b/src/configure.c index e6af22f6d..5a181f2ae 100644 --- a/src/configure.c +++ b/src/configure.c @@ -605,14 +605,6 @@ readInstanceConfigFile(InstanceState *instanceState) init_config(instance, instanceState->instance_name); - - if (fio_access(FIO_BACKUP_HOST, instanceState->instance_config_path, F_OK) != 0) - { - elog(WARNING, "Control file \"%s\" doesn't exist", instanceState->instance_config_path); - pfree(instance); - return NULL; - } - parsed_options = config_read_opt(instanceState->backup_location, instanceState->instance_config_path, instance_options, WARNING, true, true); diff --git a/src/dir.c b/src/dir.c index 48e85ba8c..62deaa03b 100644 --- a/src/dir.c +++ b/src/dir.c @@ -1362,7 +1362,8 @@ fileExists(const char *path, fio_location location) err_i err; bool exists; - exists = $i(pioExists, pioDriveForLocation(location), path, &err); + exists = $i(pioExists, pioDriveForLocation(location), .path = path, + .err = &err); return exists; } diff --git a/src/restore.c b/src/restore.c index 21ea11f64..c9fb71fcc 100644 --- a/src/restore.c +++ b/src/restore.c @@ -2162,8 +2162,11 @@ check_incremental_compatibility(const char *pgdata, uint64 system_identifier, */ if (incremental_mode == INCR_LSN) { + err_i err = $noerr(); + pioDrive_i drive = pioDriveForLocation(FIO_DB_HOST); + join_path_components(backup_label, pgdata, "backup_label"); - if (fio_access(FIO_DB_HOST, backup_label, F_OK) == 0) + if($i(pioExists, drive, .path = backup_label, .err = &err)) { elog(WARNING, "Destination directory contains \"backup_control\" file. " "This does NOT mean that you should delete this file and retry, only that " diff --git a/src/utils/file.c b/src/utils/file.c index d3678e95a..ae47293cf 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1068,38 +1068,6 @@ fio_readlink(fio_location location, const char *path, char *value, size_t valsiz } } -/* Check presence of the file */ -int -fio_access(fio_location location, const char* path, int mode) -{ - if (fio_is_remote(location)) - { - fio_header hdr = { - .cop = FIO_ACCESS, - .handle = -1, - .size = strlen(path) + 1, - .arg = mode, - }; - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(fio_stdout, path, hdr.size), hdr.size); - - IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); - Assert(hdr.cop == FIO_ACCESS); - - if (hdr.arg != 0) - { - errno = hdr.arg; - return -1; - } - return 0; - } - else - { - return access(path, mode); - } -} - /* Create symbolic link */ int fio_symlink(fio_location location, const char* target, const char* link_path, bool overwrite) @@ -3485,7 +3453,7 @@ pioFile_fobjDispose(VSelf) } static bool -common_pioExists(fobj_t self, path_t path, err_i *err) +common_pioExists(fobj_t self, path_t path, pio_file_kind_e expected_kind, err_i *err) { pio_stat_t buf; fobj_reset_err(err); @@ -3497,8 +3465,8 @@ common_pioExists(fobj_t self, path_t path, err_i *err) *err = $noerr(); return false; } - if ($noerr(*err) && buf.pst_kind != PIO_KIND_REGULAR) - *err = $err(SysErr, "File {path:q} is not regular", path(path)); + if ($noerr(*err) && buf.pst_kind != expected_kind) + *err = $err(SysErr, "File {path:q} is not of an expected kind", path(path)); if ($haserr(*err)) { *err = $syserr(getErrno(*err), "Could not check file existance: {cause:$M}", cause((*err).self)); diff --git a/src/utils/file.h b/src/utils/file.h index a20638813..ca99a2120 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -186,7 +186,6 @@ extern int fio_rename(fio_location location, const char* old_path, const cha extern int fio_symlink(fio_location location, const char* target, const char* link_path, bool overwrite); extern int fio_remove(fio_location location, const char* path, bool missing_ok); extern int fio_chmod(fio_location location, const char* path, int mode); -extern int fio_access(fio_location location, const char* path, int mode); extern ssize_t fio_readlink(fio_location location, const char *path, char *value, size_t valsiz); extern pid_t fio_check_postmaster(fio_location location, const char *pgdata); @@ -285,7 +284,9 @@ fobj_iface(pioPagesIterator); (err_i *, err) #define mth__pioRemove err_i, (path_t, path), (bool, missing_ok) #define mth__pioRename err_i, (path_t, old_path), (path_t, new_path) -#define mth__pioExists bool, (path_t, path), (err_i *, err) +#define mth__pioExists bool, (path_t, path), (pio_file_kind_e, expected_kind), \ + (err_i *, err) +#define mth__pioExists__optional() (expected_kind, PIO_KIND_REGULAR) #define mth__pioGetCRC32 pg_crc32, (path_t, path), (bool, compressed), \ (err_i *, err) /* Compare, that filename1 and filename2 is the same file */ From e683cf8a5cc512a38d148599feb3b5f8ea6db19e Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 12 Dec 2022 12:49:20 +0300 Subject: [PATCH 156/339] fix compilation for < PG12 --- src/restore.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/restore.c b/src/restore.c index c9fb71fcc..25d227c2e 100644 --- a/src/restore.c +++ b/src/restore.c @@ -1518,7 +1518,7 @@ update_recovery_options_before_v12(InstanceState *instanceState, pgBackup *backu join_path_components(path, instance_config.pgdata, "recovery.conf"); - err = $i(pioWriteFile, backup->database_location, .path = path, + err = $i(pioWriteFile, instanceState->database_location, .path = path, .content = ft_str2bytes(ft_strbuf_ref(&buf)), .binary = false); if ($haserr(err)) From 38f59c6567c025de74afe78007e30aef8bf15f37 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 13 Dec 2022 16:19:56 +0300 Subject: [PATCH 157/339] fix pioWriteFilter_pioWriteFinish --- src/utils/file.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/utils/file.c b/src/utils/file.c index ae47293cf..664a9c15b 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -5033,7 +5033,8 @@ pioWriteFilter_pioWriteFinish(VSelf) return err; ft_assert(r == wbuf.len); } - return err; + + return $i(pioWriteFinish, self->wrapped); } static err_i From 1d2faa383bd5f2c5b432604852100f0fbdbdcd1b Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 13 Dec 2022 16:55:59 +0300 Subject: [PATCH 158/339] fo_obj: hide warnings in $iref/$iunref --- src/fu_util/impl/fo_impl.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/fu_util/impl/fo_impl.h b/src/fu_util/impl/fo_impl.h index dc51b0f9b..b147d37a1 100644 --- a/src/fu_util/impl/fo_impl.h +++ b/src/fu_util/impl/fo_impl.h @@ -524,8 +524,8 @@ extern void fobj__validate_args(fobj_method_handle_t meth, fobj_t self, #define fobj__del_impl(ptr) fobj_del((void**)(ptr)) #endif -#define fobj__iref(iface) ((__typeof(iface)){.self=fobj_ref((iface).self)}) -#define fobj__iunref(iface) ((__typeof(iface)){.self=fobj_unref((iface).self)}) +#define fobj__iref(iface) ({__typeof(iface) t = (iface); t.self=fobj_ref(t.self); t;}) +#define fobj__iunref(iface) ({__typeof(iface) t = (iface); t.self=fobj_unref(t.self); t;}) #ifndef NDEBUG #define fobj__iset(ptr, iface) do { \ __typeof(*(ptr)) fm_uniq(_validate_ptr_) ft_unused = (__typeof(iface)){}; \ From 7866628420ef160ff617f472200c05524ceceb07 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 13 Dec 2022 19:12:36 +0300 Subject: [PATCH 159/339] fo_obj.h: improve debug-ability by making method entry function static --- src/fu_util/impl/fo_impl.h | 38 +++++++++++++++++++++++--------------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/src/fu_util/impl/fo_impl.h b/src/fu_util/impl/fo_impl.h index b147d37a1..3cb1b2edf 100644 --- a/src/fu_util/impl/fo_impl.h +++ b/src/fu_util/impl/fo_impl.h @@ -61,6 +61,7 @@ typedef struct { #define fobj__nm_kls(klass) kls__##klass #define fobj__nm_iface(iface) iface__##iface #define fobj__nm_mhandle(meth) meth##__mh +#define fobj__nm_do(meth) meth##__do #define fobj__nm_params_t(meth) meth##__params_t #define fobj__nm_invoke(meth) meth##__invoke #define fobj__nm_impl_t(meth) meth##__impl @@ -97,6 +98,7 @@ typedef struct { fobj__method_declare_impl(meth, \ fobj__nm_mhandle(meth), \ fobj__nm_params_t(meth), \ + fobj__nm_do(meth), \ fobj__nm_invoke(meth), \ fobj__nm_impl_t(meth), \ fobj__nm_cb(meth), \ @@ -127,7 +129,7 @@ typedef struct { #define fobj__method_declare_impl(meth, handle, \ params_t, \ - invoke_methparams, \ + meth_do, invoke_methparams, \ impl_meth_t, \ cb_meth, cb_meth_t, \ register_meth, wrap_decl, \ @@ -154,28 +156,33 @@ typedef struct { return (cb_meth_t){fnd.self, fnd.impl}; \ } \ \ - ft_inline ft_always_inline res \ - meth(fobj_t self comma() fobj__mapArgs_toArgs(__VA_ARGS__)) { \ - cb_meth_t cb = cb_meth(self, fobj_self_klass, true); \ + ft_static res \ + meth_do(fobj_t self, fobj_klass_handle_t parent comma() fobj__mapArgs_toArgs(__VA_ARGS__)) { \ + cb_meth_t cb = cb_meth(self, parent, true); \ return cb.impl(cb.self comma() fobj__mapArgs_toNames(__VA_ARGS__)); \ } \ \ ft_inline ft_always_inline res \ - invoke_methparams(cb_meth_t cb, params_t params) {\ + meth(fobj_t self comma() fobj__mapArgs_toArgs(__VA_ARGS__)) { \ + return meth_do(self, fobj_self_klass comma() fobj__mapArgs_toNames(__VA_ARGS__)); \ + } \ + \ + ft_inline ft_always_inline res \ + invoke_methparams(fobj_t self, fobj_klass_handle_t parent, params_t params) {\ fobj__params_defaults(meth); \ fm_when_isnt_empty(__VA_ARGS__)( \ if (ft_unlikely(!(fobj__assertArgsAnd(__VA_ARGS__)))) { \ const char * const params_s[] = { fobj__mapArgs_toNameStrs(__VA_ARGS__) }; \ char set[] = {fobj__assertArgsVals(__VA_ARGS__)}; \ - fobj__validate_args(handle(), cb.self, params_s, set, ft_arrsz(set)); \ + fobj__validate_args(handle(), self, params_s, set, ft_arrsz(set)); \ } ) \ - return cb.impl(cb.self comma() fobj__mapArgs_toNamedParams(__VA_ARGS__)); \ + return meth_do(self, parent comma() fobj__mapArgs_toNamedParams(__VA_ARGS__)); \ } \ #define fobj__method_common(meth, handle, impl_meth_t, register_meth, \ wrap_decl, comma, res, ...) \ \ - ft_static ft_gcc_const fobj_method_handle_t handle(void) { \ + ft_inline ft_gcc_const fobj_method_handle_t handle(void) { \ static volatile fobj_method_handle_t hndl = 0; \ fobj_method_handle_t h = hndl; \ if (ft_likely(h)) return h; \ @@ -427,10 +434,10 @@ typedef struct { /* Method invocation */ #define fobj_call(meth, self, ...) \ - fobj__nm_invoke(meth)(fobj__nm_cb(meth)(self, fobj_self_klass, true), fobj_pass_params(meth, __VA_ARGS__)) + fobj__nm_invoke(meth)(self, fobj_self_klass, fobj_pass_params(meth, __VA_ARGS__)) #define fobj_call_super(meth, _klassh, self, ...) \ - fobj__nm_invoke(meth)(fobj__nm_cb(meth)(self, _klassh, true), fobj_pass_params(meth, __VA_ARGS__)) + fobj__nm_invoke(meth)(self, _klassh, true), fobj_pass_params(meth, __VA_ARGS__)) #define fobj_iface_call(meth, iface, ...) \ fobj_call(meth, (fobj_t)(iface).fobj__nm_has(meth), __VA_ARGS__) @@ -446,13 +453,14 @@ typedef struct { #define fobj_ifdef(assignment, meth, self, ...) \ fobj__ifdef_impl(assignment, meth, (self), \ - fm_uniq(cb), fobj__nm_cb(meth), fobj__nm_cb_t(meth), \ + fm_uniq(cb), fm_uniq(_self), fobj__nm_cb(meth), fobj__nm_cb_t(meth), \ fobj__nm_invoke(meth), __VA_ARGS__) -#define fobj__ifdef_impl(assignment, meth, self_, cb, cb_meth, cb_meth_t, \ - invoke_meth__params, ...) ({ \ - cb_meth_t cb = cb_meth(self_, fobj_self_klass, false); \ +#define fobj__ifdef_impl(assignment, meth, self_, cb, self, cb_meth, cb_meth_t, \ + invoke_meth__params, ...) ({ \ + fobj_t self = (self_); \ + cb_meth_t cb = cb_meth(self, fobj_self_klass, false); \ if (cb.impl != NULL) { \ - assignment invoke_meth__params(cb, fobj_pass_params(meth, __VA_ARGS__)); \ + assignment invoke_meth__params(self, fobj_self_klass, fobj_pass_params(meth, __VA_ARGS__)); \ } \ cb.impl != NULL; \ }) From 698824e58111a9d015c4ff9851276bf498896363 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 14 Dec 2022 06:20:19 +0300 Subject: [PATCH 160/339] file.c: regularize remote handle handling --- src/utils/file.c | 86 ++++++++++++++++++++++++++---------------------- 1 file changed, 47 insertions(+), 39 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index 664a9c15b..f74c93c26 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -13,7 +13,7 @@ #define PRINTF_BUF_SIZE 1024 -static __thread unsigned long fio_fdset = 0; +static __thread uint64_t fio_fdset = 0; static __thread int fio_stdout = 0; static __thread int fio_stdin = 0; static __thread int fio_stderr = 0; @@ -237,6 +237,31 @@ fio_is_remote_simple(fio_location location) return is_remote; } +static int +find_free_handle(void) +{ + uint64_t m = fio_fdset; + int i; + for (i = 0; (m & 1); i++, m>>=1) {} + if (i == FIO_FDMAX) { + elog(ERROR, "Descriptor pool for remote files is exhausted, " + "probably too many remote directories are opened"); + } + return i; +} + +static void +set_handle(int i) +{ + fio_fdset |= 1 << i; +} + +static void +unset_handle(int i) +{ + fio_fdset &= ~(1 << i); +} + /* Try to read specified amount of bytes unless error or EOF are encountered */ static ssize_t fio_read_all(int fd, void* buf, size_t size) @@ -410,20 +435,14 @@ fio_opendir(fio_location location, const char* path) DIR* dir; if (fio_is_remote(location)) { - int i; + int handle; fio_header hdr; - unsigned long mask; - mask = fio_fdset; - for (i = 0; (mask & 1) != 0; i++, mask >>= 1); - if (i == FIO_FDMAX) { - elog(ERROR, "Descriptor pool for remote files is exhausted, " - "probably too many remote directories are opened"); - } + handle = find_free_handle(); hdr.cop = FIO_OPENDIR; - hdr.handle = i; + hdr.handle = handle; hdr.size = strlen(path) + 1; - fio_fdset |= 1 << i; + set_handle(handle); IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_write_all(fio_stdout, path, hdr.size), hdr.size); @@ -433,10 +452,10 @@ fio_opendir(fio_location location, const char* path) if (hdr.arg != 0) { errno = hdr.arg; - fio_fdset &= ~(1 << hdr.handle); + unset_handle(hdr.handle); return NULL; } - dir = (DIR*)(size_t)(i + 1); + dir = (DIR*)(size_t)(handle + 1); } else { @@ -484,7 +503,7 @@ fio_closedir(DIR *dir) hdr.cop = FIO_CLOSEDIR; hdr.handle = (size_t)dir - 1; hdr.size = 0; - fio_fdset &= ~(1 << hdr.handle); + unset_handle(hdr.handle); IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); return 0; @@ -502,24 +521,18 @@ fio_open(fio_location location, const char* path, int mode) int fd; if (fio_is_remote(location)) { - int i; + int handle; fio_header hdr; - unsigned long mask; - - mask = fio_fdset; - for (i = 0; (mask & 1) != 0; i++, mask >>= 1); - if (i == FIO_FDMAX) - elog(ERROR, "Descriptor pool for remote files is exhausted, " - "probably too many remote files are opened"); + handle = find_free_handle(); hdr.cop = FIO_OPEN; - hdr.handle = i; + hdr.handle = handle; hdr.size = strlen(path) + 1; hdr.arg = mode; // hdr.arg = mode & ~O_EXCL; // elog(INFO, "PATH: %s MODE: %i, %i", path, mode, O_EXCL); // elog(INFO, "MODE: %i", hdr.arg); - fio_fdset |= 1 << i; + set_handle(handle); IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_write_all(fio_stdout, path, hdr.size), hdr.size); @@ -530,10 +543,10 @@ fio_open(fio_location location, const char* path, int mode) if (hdr.arg != 0) { errno = hdr.arg; - fio_fdset &= ~(1 << hdr.handle); + unset_handle(hdr.handle); return -1; } - fd = i | FIO_PIPE_MARKER; + fd = handle | FIO_PIPE_MARKER; } else { @@ -641,7 +654,7 @@ fio_close(int fd) .arg = 0, }; - fio_fdset &= ~(1 << hdr.handle); + unset_handle(hdr.handle); IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); /* Wait for response */ @@ -3975,23 +3988,18 @@ pioRemoteDrive_pioOpen(VSelf, path_t path, int flags, int permissions, err_i *err) { - int i; + int handle; fio_header hdr; - unsigned long mask; fobj_reset_err(err); fobj_t file; - mask = fio_fdset; - for (i = 0; (mask & 1) != 0; i++, mask >>= 1); - if (i == FIO_FDMAX) - elog(ERROR, "Descriptor pool for remote files is exhausted, " - "probably too many remote files are opened"); + handle = find_free_handle(); hdr.cop = FIO_OPEN; - hdr.handle = i; + hdr.handle = handle; hdr.size = strlen(path) + 1; hdr.arg = flags; - fio_fdset |= 1 << i; + set_handle(handle); IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_write_all(fio_stdout, path, hdr.size), hdr.size); @@ -4003,10 +4011,10 @@ pioRemoteDrive_pioOpen(VSelf, path_t path, { *err = $syserr((int)hdr.arg, "Cannot open remote file {path:q}", path(path)); - fio_fdset &= ~(1 << hdr.handle); + unset_handle(hdr.handle); return (pioFile_i){NULL}; } - file = $alloc(pioRemoteFile, .handle = i, + file = $alloc(pioRemoteFile, .handle = handle, .p = { .path = ft_cstrdup(path), .flags = flags }); return bind_pioFile(file); } @@ -4398,7 +4406,7 @@ pioRemoteFile_doClose(VSelf) .arg = 0, }; - fio_fdset &= ~(1 << hdr.handle); + unset_handle(hdr.handle); IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); /* Wait for response */ From 3e3178e56730394e675a68eec5fb8659b9c53e98 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 14 Dec 2022 07:11:00 +0300 Subject: [PATCH 161/339] ... pioWrite will return error if it writes lesser. --- src/data.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/data.c b/src/data.c index fe052d017..71c73c59d 100644 --- a/src/data.c +++ b/src/data.c @@ -337,13 +337,9 @@ write_page(pgFile *file, pioFile_i out, int blknum, Page page) err = $noerr(); rc = $i(pioWrite, out, .buf = ft_bytes(page, BLCKSZ), .err = &err); - if ($haserr(err)) - { ft_log(FT_INFO, $errmsg(err), "write_page"); - } - if (rc != BLCKSZ) - return -1; + ft_assert(rc == BLCKSZ); file->write_size += BLCKSZ; file->uncompressed_size += BLCKSZ; From c516fcb7b918530041115e23df1b860e124af4f6 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 14 Dec 2022 07:10:39 +0300 Subject: [PATCH 162/339] change pioSeek signature --- src/data.c | 12 ++---------- src/utils/file.c | 22 +++++++++------------- src/utils/file.h | 2 +- 3 files changed, 12 insertions(+), 24 deletions(-) diff --git a/src/data.c b/src/data.c index 71c73c59d..b6741bce3 100644 --- a/src/data.c +++ b/src/data.c @@ -321,21 +321,13 @@ write_page(pgFile *file, pioFile_i out, int blknum, Page page) { err_i err = $noerr(); off_t target = blknum * BLCKSZ; - off_t position; size_t rc; - position = $i(pioSeek, out, target, &err); + err = $i(pioSeek, out, target); if ($haserr(err)) - { ft_logerr(FT_ERROR, $errmsg(err), "write_page"); - } - if (position != target) - { - elog(ERROR, "Can't seek to position %ld", target); - } - /* write data page */ - err = $noerr(); + /* write data page */ rc = $i(pioWrite, out, .buf = ft_bytes(page, BLCKSZ), .err = &err); if ($haserr(err)) ft_log(FT_INFO, $errmsg(err), "write_page"); diff --git a/src/utils/file.c b/src/utils/file.c index f74c93c26..416020357 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -3935,21 +3935,19 @@ pioLocalFile_pioWrite(VSelf, ft_bytes_t buf, err_i *err) return r; } -static off_t -pioLocalFile_pioSeek(VSelf, off_t offs, err_i *err) +static err_i +pioLocalFile_pioSeek(VSelf, off_t offs) { Self(pioLocalFile); - fobj_reset_err(err); ft_assert(self->fd >= 0, "Closed file abused \"%s\"", self->p.path); off_t pos = lseek(self->fd, offs, SEEK_SET); if (pos == (off_t)-1) - { - *err = $syserr(errno, "Can not seek to {offs} in file {path:q}", offs(offs), path(self->p.path)); - } - return pos; + return $syserr(errno, "Can not seek to {offs} in file {path:q}", offs(offs), path(self->p.path)); + ft_assert(pos == offs); + return $noerr(); } static err_i @@ -4635,24 +4633,22 @@ pioRemoteFile_pioWrite(VSelf, ft_bytes_t buf, err_i *err) return buf.len; } -static off_t -pioRemoteFile_pioSeek(VSelf, off_t offs, err_i *err) +static err_i +pioRemoteFile_pioSeek(VSelf, off_t offs) { Self(pioRemoteFile); fio_header hdr; - fobj_reset_err(err); - ft_assert(self->handle >= 0, "Remote closed file abused \"%s\"", self->p.path); hdr.cop = FIO_SEEK; - hdr.handle = self->handle & ~FIO_PIPE_MARKER; + hdr.handle = self->handle; hdr.size = 0; hdr.arg = offs; IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - return 0; + return $noerr(); } static err_i diff --git a/src/utils/file.h b/src/utils/file.h index ca99a2120..98cc8b49e 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -238,7 +238,7 @@ fobj_error_cstr_key(gzErrStr); #define mth__pioWrite size_t, (ft_bytes_t, buf), (err_i *, err) #define mth__pioTruncate err_i, (size_t, sz) #define mth__pioWriteFinish err_i -#define mth__pioSeek off_t, (off_t, offs), (err_i *, err) +#define mth__pioSeek err_i, (off_t, offs) fobj_method(pioClose); fobj_method(pioRead); From 156419422c72ada155709967cd30db9a967a8e44 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 14 Dec 2022 08:11:06 +0300 Subject: [PATCH 163/339] expose pioDBDrive more --- src/data.c | 6 +++--- src/utils/file.c | 46 ++++++++++++++++++++++++++++------------------ src/utils/file.h | 3 ++- 3 files changed, 33 insertions(+), 22 deletions(-) diff --git a/src/data.c b/src/data.c index b6741bce3..e097f3236 100644 --- a/src/data.c +++ b/src/data.c @@ -1299,7 +1299,7 @@ bool check_data_file(pgFile *file, const char *from_fullpath, uint32 checksum_version) { FOBJ_FUNC_ARP(); - pioDrive_i local_location = pioDriveForLocation(FIO_LOCAL_HOST); + pioDBDrive_i local_location = pioDBDriveForLocation(FIO_LOCAL_HOST); pioPagesIterator_i pages; bool is_valid = true; err_i err; @@ -1766,7 +1766,7 @@ send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, { FOBJ_FUNC_ARP(); pioDrive_i backup_location = pioDriveForLocation(FIO_BACKUP_HOST); - pioDrive_i db_location = pioDriveForLocation(FIO_DB_HOST); + pioDBDrive_i db_location = pioDBDriveForLocation(FIO_DB_HOST); pioPagesIterator_i pages; pioFile_i out = $null(pioFile); pioWriteFlush_i wrapped = $null(pioWriteFlush); @@ -1865,7 +1865,7 @@ copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, BackupMode backup_mode) { FOBJ_FUNC_ARP(); - pioDrive_i backup_location = pioDriveForLocation(FIO_BACKUP_HOST); + pioDBDrive_i backup_location = pioDBDriveForLocation(FIO_BACKUP_HOST); err_i err = $noerr(); pioPagesIterator_i pages; pioFile_i out; diff --git a/src/utils/file.c b/src/utils/file.c index 416020357..fa3231daa 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -105,10 +105,10 @@ typedef struct __attribute__((packed)) static void dir_list_file(parray *files, const char *root, bool handle_tablespaces, bool follow_symlink, bool backup_logs, bool skip_hidden, - int external_dir_num, pioDrive_i drive); + int external_dir_num, pioDBDrive_i drive); static void dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, bool handle_tablespaces, bool follow_symlink, bool backup_logs, - bool skip_hidden, int external_dir_num, pioDrive_i drive); + bool skip_hidden, int external_dir_num, pioDBDrive_i drive); void setMyLocation(ProbackupSubcmd const subcmd) @@ -1664,7 +1664,7 @@ fio_receive_pio_err(fio_header *hdr) } static void -fio_iterate_pages_impl(pioDrive_i drive, int out, const char *path, +fio_iterate_pages_impl(pioDBDrive_i drive, int out, const char *path, datapagemap_t pagemap, fio_iterate_pages_request *params) { @@ -1673,7 +1673,7 @@ fio_iterate_pages_impl(pioDrive_i drive, int out, const char *path, fio_header hdr = {.cop=FIO_ITERATE_DATA}; BlockNumber finalN; - pages = $(pioIteratePages, drive.self, + pages = $i(pioIteratePages, drive, .path = path, .segno = params->segno, .pagemap = pagemap, @@ -2525,13 +2525,14 @@ fio_remove_dir_impl(int out, char* buf) { */ static void dir_list_file(parray *files, const char *root, bool handle_tablespaces, bool follow_symlink, - bool backup_logs, bool skip_hidden, int external_dir_num, pioDrive_i drive) + bool backup_logs, bool skip_hidden, int external_dir_num, pioDBDrive_i drive) { pgFile *file; Assert(!$i(pioIsRemote, drive)); - file = pgFileNew(root, "", follow_symlink, external_dir_num, drive); + file = pgFileNew(root, "", follow_symlink, external_dir_num, + $reduce(pioDrive, drive)); if (file == NULL) { /* For external directory this is not ok */ @@ -2570,7 +2571,7 @@ dir_list_file(parray *files, const char *root, bool handle_tablespaces, bool fol static void dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, bool handle_tablespaces, bool follow_symlink, bool backup_logs, - bool skip_hidden, int external_dir_num, pioDrive_i drive) + bool skip_hidden, int external_dir_num, pioDBDrive_i drive) { DIR *dir; struct dirent *dent; @@ -2607,7 +2608,7 @@ dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, join_path_components(rel_child, parent->rel_path, dent->d_name); file = pgFileNew(child, rel_child, follow_symlink, - external_dir_num, drive); + external_dir_num, $reduce(pioDrive, drive)); if (file == NULL) continue; @@ -2716,7 +2717,7 @@ dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, * TODO: replace FIO_SEND_FILE and FIO_SEND_FILE_EOF with dedicated messages */ static void -fio_list_dir_impl(int out, char* buf, pioDrive_i drive) +fio_list_dir_impl(int out, char* buf, pioDBDrive_i drive) { int i; fio_header hdr; @@ -3031,7 +3032,7 @@ fio_communicate(int in, int out) size_t buf_size = 128*1024; char* buf = (char*)pgut_malloc(buf_size); fio_header hdr; - pioDrive_i drive; + pioDBDrive_i drive; pio_stat_t st; ft_bytes_t bytes; int rc; @@ -3041,7 +3042,7 @@ fio_communicate(int in, int out) FOBJ_FUNC_ARP(); - drive = pioDriveForLocation(FIO_LOCAL_HOST); + drive = pioDBDriveForLocation(FIO_LOCAL_HOST); #ifdef WIN32 SYS_CHECK(setmode(in, _O_BINARY)); @@ -3441,16 +3442,25 @@ typedef struct pioCRC32Counter int64_t size; } pioCRC32Counter; -static pioDrive_i localDrive; -static pioDrive_i remoteDrive; +static pioDBDrive_i localDrive; +static pioDBDrive_i remoteDrive; pioDrive_i pioDriveForLocation(fio_location loc) { if (fio_is_remote(loc)) - return remoteDrive; + return $reduce(pioDrive, remoteDrive); else - return localDrive; + return $reduce(pioDrive, localDrive); +} + +pioDBDrive_i +pioDBDriveForLocation(fio_location loc) +{ + if (fio_is_remote(loc)) + return remoteDrive; + else + return localDrive; } /* Base physical file type */ @@ -3615,7 +3625,7 @@ pioLocalDrive_pioListDir(VSelf, parray *files, const char *root, bool handle_tab FOBJ_FUNC_ARP(); Self(pioLocalDrive); dir_list_file(files, root, handle_tablespaces, follow_symlink, backup_logs, - skip_hidden, external_dir_num, $bind(pioDrive, self)); + skip_hidden, external_dir_num, $bind(pioDBDrive, self)); } static void @@ -6033,6 +6043,6 @@ init_pio_objects(void) { FOBJ_FUNC_ARP(); - localDrive = bindref_pioDrive($alloc(pioLocalDrive)); - remoteDrive = bindref_pioDrive($alloc(pioRemoteDrive)); + localDrive = bindref_pioDBDrive($alloc(pioLocalDrive)); + remoteDrive = bindref_pioDBDrive($alloc(pioRemoteDrive)); } diff --git a/src/utils/file.h b/src/utils/file.h index 98cc8b49e..f767b461d 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -335,6 +335,7 @@ fobj_iface(pioDrive); fobj_iface(pioDBDrive); extern pioDrive_i pioDriveForLocation(fio_location location); +extern pioDBDrive_i pioDBDriveForLocation(fio_location location); struct doIteratePages_params { path_t from_fullpath; @@ -351,7 +352,7 @@ struct doIteratePages_params { extern pioPagesIterator_i doIteratePages_impl(pioIteratePages_i drive, struct doIteratePages_params p); #define doIteratePages(drive, ...) \ - doIteratePages_impl($bind(pioIteratePages, drive.self), ((struct doIteratePages_params){ \ + doIteratePages_impl($reduce(pioIteratePages, drive), ((struct doIteratePages_params){ \ .start_lsn = InvalidXLogRecPtr, \ .calg = NONE_COMPRESS, .clevel = 0, \ .just_validate = false, \ From 096a8f9fe04c5b4380ec8bb6bd0bb76273e7513e Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sat, 10 Dec 2022 16:26:42 +0300 Subject: [PATCH 164/339] [PBCKP-395] introduce pioLocalDrive_pioOpenRewrite This is "only streaming write" file abstraction with support for atomicity. It uses temporary file and rename on-disk storage. --- src/utils/file.c | 202 +++++++++++++++++++++++++++++++++++++++++++++++ src/utils/file.h | 11 ++- 2 files changed, 211 insertions(+), 2 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index fa3231daa..7e8b6550c 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -3368,6 +3368,19 @@ typedef struct pioLocalFile #define kls__pioLocalFile iface__pioFile, iface(pioFile) fobj_klass(pioLocalFile); +typedef struct pioLocalWriteFile +{ + ft_str_t path; + ft_str_t path_tmp; + FILE* fl; + ft_bytes_t buf; + bool use_temp; + bool renamed; +} pioLocalWriteFile; +#define kls__pioLocalWriteFile iface__pioWriteCloser, mth(fobjDispose), \ + iface(pioWriteCloser) +fobj_klass(pioLocalWriteFile); + typedef struct pioRemoteFile { pioFile p; @@ -3522,6 +3535,73 @@ pioLocalDrive_pioOpen(VSelf, path_t path, int flags, return bind_pioFile(file); } +static pioWriteCloser_i +pioLocalDrive_pioOpenRewrite(VSelf, path_t path, int permissions, + bool binary, bool use_temp, err_i *err) +{ + Self(pioLocalDrive); + ft_str_t temppath; + int fd = -1; + FILE* fl; + ft_bytes_t buf; + fobj_t res; + + fobj_reset_err(err); + + if (use_temp) + { + temppath = ft_asprintf("%s~tmpXXXXXX", path); + fd = mkstemp(temppath.ptr); + } + else + { + temppath = ft_strdupc(path); + fd = open(path, O_CREAT|O_TRUNC|O_WRONLY, permissions); + } + + if (fd < 0) + { + *err = $syserr(errno, "Create file {path} failed", path(temppath.ptr)); + close(fd); + ft_str_free(&temppath); + return $null(pioWriteCloser); + } + +#ifdef WIN32 + if (binary && _setmode(fd, _O_BINARY) < 0) + { + *err = $syserr(errno, "Changing permissions for {path} failed", + path(temppath.ptr)); + close(fd); + ft_str_free(&temppath); + return $null(pioWriteCloser); + } +#endif + + if (chmod(temppath.ptr, permissions)) + { + *err = $syserr(errno, "Changing permissions for {path} failed", + path(temppath.ptr)); + close(fd); + ft_str_free(&temppath); + return $null(pioWriteCloser); + } + + fl = fdopen(fd, binary ? "wb" : "w"); + ft_assert(fl != NULL); + + buf = ft_bytes_alloc(CHUNK_SIZE); + setvbuf(fl, buf.ptr, _IOFBF, buf.len); + + res = $alloc(pioLocalWriteFile, + .path = ft_strdupc(path), + .path_tmp = temppath, + .use_temp = use_temp, + .fl = fl, + .buf = buf); + return $bind(pioWriteCloser, res); +} + static pio_stat_t pioLocalDrive_pioStat(VSelf, path_t path, bool follow_symlink, err_i *err) { @@ -3989,6 +4069,118 @@ pioLocalFile_fobjRepr(VSelf) (path, $S(self->p.path)), (fd, $I(self->fd))); } +static size_t +pioLocalWriteFile_pioWrite(VSelf, ft_bytes_t buf, err_i* err) +{ + Self(pioLocalWriteFile); + fobj_reset_err(err); + size_t r; + + if (buf.len == 0) + return 0; + + r = fwrite(buf.ptr, 1, buf.len, self->fl); + if (r < buf.len) + *err = $syserr(errno, "Writting file {path:q}", + path(self->path_tmp.ptr)); + return r; +} + +static err_i +pioLocalWriteFile_pioWriteFinish(VSelf) +{ + Self(pioLocalWriteFile); + err_i err = $noerr(); + + if (fflush(self->fl) != 0) + err = $syserr(errno, "Flushing file {path:q}", + path(self->path_tmp.ptr)); + return err; +} + +static err_i +pioLocalWriteFile_pioClose(VSelf, bool sync) +{ + Self(pioLocalWriteFile); + int fd; + int r; + + fd = fileno(self->fl); + + if (ferror(self->fl)) + { + fclose(self->fl); + self->fl = NULL; + if (remove(self->path_tmp.ptr)) + return $syserr(errno, "Couldn't remove file {path:q}", + path(self->path_tmp.ptr)); + return $noerr(); + } + + if (fflush(self->fl) != 0) + return $syserr(errno, "Flushing file {path:q}", + path(self->path_tmp.ptr)); + + if (sync) + { + r = fsync(fd); + if (r < 0) + return $syserr(errno, "Cannot fsync file {path:q}", + path(self->path_tmp.ptr)); + } + + if (self->use_temp) + { + if (rename(self->path_tmp.ptr, self->path.ptr)) + return $syserr(errno, "Cannot rename file {old_path:q} to {new_path:q}", + old_path(self->path_tmp.ptr), + new_path(self->path.ptr)); + /* mark as renamed so fobjDispose will not delete it */ + self->renamed = true; + + if (sync) + { + /* + * To guarantee renaming the file is persistent, fsync the file with its + * new name, and its containing directory. + */ + r = fsync(fd); + if (r < 0) + return $syserr(errno, "Cannot fsync file {path:q}", + path(self->path.ptr)); + + if (fsync_parent_path_compat(self->path.ptr) != 0) + return $syserr(errno, "Cannot fsync file {path:q}", + path(self->path.ptr)); + } + } + + if (fclose(self->fl)) + return $syserr(errno, "Cannot close file {path:q}", + path(self->path_tmp.ptr)); + self->fl = NULL; + + return $noerr(); +} + +static void +pioLocalWriteFile_fobjDispose(VSelf) +{ + Self(pioLocalWriteFile); + if (self->fl != NULL) + { + fclose(self->fl); + self->fl = NULL; + } + if (self->use_temp && !self->renamed) + { + remove(self->path_tmp.ptr); + } + ft_str_free(&self->path); + ft_str_free(&self->path_tmp); + ft_bytes_free(&self->buf); +} + /* REMOTE DRIVE */ static pioFile_i @@ -4802,6 +4994,15 @@ pioRemoteFile_fobjRepr(VSelf) (err, self->asyncError.self)); } +static pioWriteCloser_i +pioRemoteDrive_pioOpenRewrite(VSelf, path_t path, int permissions, + bool binary, bool use_temp, err_i *err) +{ + Self(pioRemoteDrive); + *err = $err(RT, "NOT IMPLEMENTED"); + return $null(pioWriteCloser); +} + pioRead_i pioWrapReadFilter(pioRead_i fl, pioFilter_i flt, size_t buf_size) { @@ -6028,6 +6229,7 @@ fobj_klass_handle(pioLocalDrive); fobj_klass_handle(pioRemoteDrive); fobj_klass_handle(pioLocalFile, inherits(pioFile), mth(fobjDispose, fobjRepr)); fobj_klass_handle(pioRemoteFile, inherits(pioFile), mth(fobjDispose, fobjRepr)); +fobj_klass_handle(pioLocalWriteFile); fobj_klass_handle(pioWriteFilter, mth(fobjDispose, fobjRepr)); fobj_klass_handle(pioReadFilter, mth(fobjDispose, fobjRepr)); fobj_klass_handle(pioDevNull); diff --git a/src/utils/file.h b/src/utils/file.h index f767b461d..dbcc3624b 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -280,6 +280,11 @@ fobj_iface(pioPagesIterator); #define mth__pioOpen pioFile_i, (path_t, path), (int, flags), \ (int, permissions), (err_i *, err) #define mth__pioOpen__optional() (permissions, FILE_PERMISSION) +#define mth__pioOpenRewrite pioWriteCloser_i, (path_t, path), (int, permissions), \ + (bool, binary), (bool, use_temp), \ + (err_i *, err) +#define mth__pioOpenRewrite__optional() (binary, true), (use_temp, true), \ + (permissions, FILE_PERMISSION) #define mth__pioStat pio_stat_t, (path_t, path), (bool, follow_symlink), \ (err_i *, err) #define mth__pioRemove err_i, (path_t, path), (bool, missing_ok) @@ -311,6 +316,7 @@ fobj_iface(pioPagesIterator); (uint32, checksum_version), (bool, just_validate), (err_i*, err) fobj_method(pioOpen); +fobj_method(pioOpenRewrite); fobj_method(pioStat); fobj_method(pioRemove); fobj_method(pioRename); @@ -326,9 +332,10 @@ fobj_method(pioWriteFile); fobj_method(pioIteratePages); #define iface__pioDrive mth(pioOpen, pioStat, pioRemove, pioRename), \ - mth(pioExists, pioGetCRC32, pioIsRemote), \ + mth(pioExists, pioGetCRC32, pioIsRemote), \ mth(pioMakeDir, pioListDir, pioRemoveDir), \ - mth(pioFilesAreSame, pioReadFile, pioWriteFile) + mth(pioFilesAreSame, pioReadFile, pioWriteFile), \ + mth(pioOpenRewrite) fobj_iface(pioDrive); #define iface__pioDBDrive iface__pioDrive, mth(pioIteratePages) From 19fcad262e9cabbc387789c94178053b56a0297c Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 14 Dec 2022 06:32:19 +0300 Subject: [PATCH 165/339] [PBCKP-395] implement remote pioOpenRewrite pioRemoteWriteFile uses async api exclusively. --- src/utils/file.c | 245 ++++++++++++++++++++++++++++++++++++++++++++++- src/utils/file.h | 5 + 2 files changed, 248 insertions(+), 2 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index 7e8b6550c..d5d60744f 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -95,6 +95,12 @@ typedef struct __attribute__((packed)) int just_validate; } fio_iterate_pages_request; +struct __attribute__((packed)) fio_req_open_rewrite { + uint32_t permissions; + bool binary; + bool use_temp; +}; + /* Convert FIO pseudo handle to index in file descriptor array */ #define fio_fileno(f) (((size_t)f - 1) | FIO_PIPE_MARKER) @@ -3028,6 +3034,10 @@ fio_communicate(int in, int out) */ int fd[FIO_FDMAX]; DIR* dir[FIO_FDMAX]; + + fobj_t objs[FIO_FDMAX] = {0}; + err_i async_errs[FIO_FDMAX] = {0}; + struct dirent* entry; size_t buf_size = 128*1024; char* buf = (char*)pgut_malloc(buf_size); @@ -3326,6 +3336,92 @@ fio_communicate(int in, int out) fio_iterate_pages_impl(drive, out, from_fullpath, pagemap, params); } break; + case PIO_OPEN_REWRITE: + { + struct fio_req_open_rewrite *req = (void*)buf; + const char *path = buf + sizeof(*req); + pioWriteCloser_i fl; + err_i err; + + ft_assert(hdr.handle >= 0); + ft_assert(objs[hdr.handle] == NULL); + + fl = $i(pioOpenRewrite, drive, .path = path, + .permissions = req->permissions, + .binary = req->binary, + .use_temp = req->use_temp, + .err = &err); + if ($haserr(err)) + fio_send_pio_err(out, err); + else + { + hdr.size = 0; + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); + objs[hdr.handle] = $ref(fl.self); + } + break; + } + case PIO_WRITE_ASYNC: + { + err_i err; + + ft_assert(hdr.handle >= 0); + ft_assert(objs[hdr.handle] != NULL); + + $(pioWrite, objs[hdr.handle], ft_bytes(buf, hdr.size), + .err = &err); + if ($haserr(err)) + $iset(&async_errs[hdr.handle], err); + break; + } + case PIO_GET_ASYNC_ERROR: + { + ft_assert(hdr.handle >= 0); + ft_assert(objs[hdr.handle] != NULL); + ft_assert(hdr.size == 0); + + if ($haserr(async_errs[hdr.handle])) + { + fio_send_pio_err(out, async_errs[hdr.handle]); + $idel(&async_errs[hdr.handle]); + } + else + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); + break; + } + case PIO_CLOSE: + { + err_i err; + + ft_assert(hdr.handle >= 0); + ft_assert(objs[hdr.handle] != NULL); + ft_assert(hdr.size == 1); + + err = $(pioClose, objs[hdr.handle], .sync = buf[0]); + err = fobj_err_combine(err, async_errs[hdr.handle]); + if ($haserr(err)) + { + fio_send_pio_err(out, err); + } + else + { + hdr.size = 0; + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); + } + $del(&objs[hdr.handle]); + $idel(&async_errs[hdr.handle]); + break; + } + case PIO_DISPOSE: + { + ft_assert(hdr.handle >= 0); + ft_assert(objs[hdr.handle] != NULL); + ft_assert(hdr.size == 0); + + $del(&objs[hdr.handle]); + $idel(&async_errs[hdr.handle]); + break; + } default: Assert(false); } @@ -3397,6 +3493,14 @@ typedef struct pioRemoteFile mth(pioSetAsync, pioAsyncRead, pioAsyncWrite, pioAsyncError) fobj_klass(pioRemoteFile); +typedef struct pioRemoteWriteFile { + ft_str_t path; + int handle; +} pioRemoteWriteFile; +#define kls__pioRemoteWriteFile iface__pioWriteCloser, mth(fobjDispose), \ + iface(pioWriteCloser) +fobj_klass(pioRemoteWriteFile); + typedef struct pioReadFilter { pioRead_i wrapped; pioFilter_i filter; @@ -4999,8 +5103,144 @@ pioRemoteDrive_pioOpenRewrite(VSelf, path_t path, int permissions, bool binary, bool use_temp, err_i *err) { Self(pioRemoteDrive); - *err = $err(RT, "NOT IMPLEMENTED"); - return $null(pioWriteCloser); + ft_strbuf_t buf = ft_strbuf_zero(); + fobj_t fl; + int handle = find_free_handle(); + + fio_header hdr = { + .cop = PIO_OPEN_REWRITE, + .handle = handle, + }; + + struct fio_req_open_rewrite req = { + .permissions = permissions, + .binary = binary, + .use_temp = use_temp + }; + + fio_ensure_remote(); + + ft_strbuf_catbytes(&buf, ft_bytes(&hdr, sizeof(hdr))); + ft_strbuf_catbytes(&buf, ft_bytes(&req, sizeof(req))); + ft_strbuf_catc(&buf, path); + ft_strbuf_cat1(&buf, '\0'); + + ((fio_header*)buf.ptr)->size = buf.len - sizeof(hdr); + + IO_CHECK(fio_write_all(fio_stdout, buf.ptr, buf.len), buf.len); + + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + + if (hdr.cop == FIO_PIO_ERROR) + { + *err = fio_receive_pio_err(&hdr); + return $null(pioWriteCloser); + } + ft_dbg_assert(hdr.cop == PIO_OPEN_REWRITE && + hdr.handle == handle); + + set_handle(handle); + + fl = $alloc(pioRemoteWriteFile, + .path = ft_strdupc(path), + .handle = handle); + return $bind(pioWriteCloser, fl); +} + +static size_t +pioRemoteWriteFile_pioWrite(VSelf, ft_bytes_t buf, err_i* err) +{ + Self(pioRemoteWriteFile); + fobj_reset_err(err); + fio_header hdr; + + ft_assert(self->handle >= 0); + + if (buf.len == 0) + return 0; + + hdr = (fio_header){ + .cop = PIO_WRITE_ASYNC, + .handle = self->handle, + .size = buf.len, + .arg = 0, + }; + + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_write_all(fio_stdout, buf.ptr, buf.len), buf.len); + + return buf.len; +} + +static err_i +pioRemoteWriteFile_pioWriteFinish(VSelf) +{ + Self(pioRemoteWriteFile); + fio_header hdr; + + ft_assert(self->handle >= 0); + + hdr = (fio_header){ + .cop = PIO_GET_ASYNC_ERROR, + .handle = self->handle, + }; + + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + + if (hdr.cop == FIO_PIO_ERROR) + return fio_receive_pio_err(&hdr); + ft_dbg_assert(hdr.cop == PIO_GET_ASYNC_ERROR); + + return $noerr(); +} + +static err_i +pioRemoteWriteFile_pioClose(VSelf, bool sync) +{ + Self(pioRemoteWriteFile); + fio_header hdr; + struct __attribute__((packed)) { + fio_header hdr; + bool sync; + } req = { + .hdr = { + .cop = PIO_CLOSE, + .handle = self->handle, + .size = 1, + }, + .sync = sync, + }; + + ft_assert(self->handle >= 0); + + IO_CHECK(fio_write_all(fio_stdout, &req, sizeof(req)), sizeof(req)); + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + + unset_handle(self->handle); + self->handle = -1; + + if (hdr.cop == FIO_PIO_ERROR) + return fio_receive_pio_err(&hdr); + return $noerr(); +} + +static void +pioRemoteWriteFile_fobjDispose(VSelf) +{ + Self(pioRemoteWriteFile); + + if (self->handle >= 0) + { + fio_header hdr = { + .cop = PIO_DISPOSE, + .handle = self->handle, + }; + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + unset_handle(self->handle); + } + ft_str_free(&self->path); } pioRead_i @@ -6230,6 +6470,7 @@ fobj_klass_handle(pioRemoteDrive); fobj_klass_handle(pioLocalFile, inherits(pioFile), mth(fobjDispose, fobjRepr)); fobj_klass_handle(pioRemoteFile, inherits(pioFile), mth(fobjDispose, fobjRepr)); fobj_klass_handle(pioLocalWriteFile); +fobj_klass_handle(pioRemoteWriteFile); fobj_klass_handle(pioWriteFilter, mth(fobjDispose, fobjRepr)); fobj_klass_handle(pioReadFilter, mth(fobjDispose, fobjRepr)); fobj_klass_handle(pioDevNull); diff --git a/src/utils/file.h b/src/utils/file.h index dbcc3624b..71710de19 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -75,6 +75,11 @@ typedef enum FIO_ITERATE_PAGES, FIO_ITERATE_DATA, FIO_ITERATE_EOF, + PIO_OPEN_REWRITE, + PIO_WRITE_ASYNC, + PIO_GET_ASYNC_ERROR, + PIO_CLOSE, + PIO_DISPOSE, } fio_operations; typedef struct From 5021a996575fda251fa436db49fa48850e815145 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 13 Dec 2022 16:22:41 +0300 Subject: [PATCH 166/339] [PBCKP-395] use pioOpenWrite in send_pages --- src/data.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/data.c b/src/data.c index e097f3236..4e3ef01df 100644 --- a/src/data.c +++ b/src/data.c @@ -1768,7 +1768,7 @@ send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, pioDrive_i backup_location = pioDriveForLocation(FIO_BACKUP_HOST); pioDBDrive_i db_location = pioDBDriveForLocation(FIO_DB_HOST); pioPagesIterator_i pages; - pioFile_i out = $null(pioFile); + pioWriteCloser_i out = $null(pioWriteCloser); pioWriteFlush_i wrapped = $null(pioWriteFlush); pioCRC32Counter *crc32 = NULL; ft_arr_bpph2_t harray = ft_arr_init(); @@ -1793,7 +1793,8 @@ send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, if (value.page_result == PageIsOk) { if($isNULL(out)) { - out = $i(pioOpen, backup_location, to_fullpath, PG_BINARY|O_CREAT|O_RDWR, 0, &err); + out = $i(pioOpenRewrite, backup_location, to_fullpath, + .use_temp = false, .err = &err); if ($haserr(err)) return $iresult(err); crc32 = pioCRC32Counter_alloc(); From 85dc5c09a0eef9e62f5b8618869e76b46be49ace Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 13 Dec 2022 16:46:47 +0300 Subject: [PATCH 167/339] [PBCKP-395] use pioOpenRewrite in write_backup_filelist --- src/catalog.c | 44 +++++++++++++++----------------------------- 1 file changed, 15 insertions(+), 29 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index a8f371065..c57be6852 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -2466,31 +2466,32 @@ void write_backup_filelist(pgBackup *backup, parray *files, const char *root, parray *external_list, bool sync) { + FOBJ_FUNC_ARP(); char control_path[MAXPGPATH]; - char control_path_temp[MAXPGPATH]; size_t i = 0; int64 backup_size_on_disk = 0; int64 uncompressed_size_on_disk = 0; int64 wal_size_on_disk = 0; - pioFile_i out; + pioWriteCloser_i out; + pioCRC32Counter* crc; + pioWriteFlush_i wrapped; pioDrive_i backup_drive = backup->backup_location; err_i err; ft_strbuf_t line = ft_strbuf_zero(); join_path_components(control_path, backup->root_dir, DATABASE_FILE_LIST); - snprintf(control_path_temp, sizeof(control_path_temp), "%s.tmp", control_path); - out = $i(pioOpen, backup_drive, control_path_temp, - .flags = O_WRONLY | O_CREAT | O_EXCL | PG_BINARY, - .err = &err); + out = $i(pioOpenRewrite, backup_drive, control_path, .err = &err); if ($haserr(err)) - elog(ERROR, "Cannot open file list \"%s\": %s", control_path_temp, + elog(ERROR, "Cannot open file list \"%s\": %s", control_path, strerror(errno)); - if (sync) - INIT_CRC32C(backup->content_crc); + crc = pioCRC32Counter_alloc(); + wrapped = pioWrapWriteFilter($reduce(pioWriteFlush, out), + $bind(pioFilter, crc), + OUT_BUF_SIZE); /* print each file in the list */ for (i = 0; i < parray_num(files); i++) @@ -2562,10 +2563,7 @@ write_backup_filelist(pgBackup *backup, parray *files, const char *root, ft_strbuf_catf(&line, "}\n"); - if (sync) - COMP_CRC32C(backup->content_crc, (char*)line.ptr, line.len); - - $i(pioWrite, out, ft_bytes(line.ptr, line.len), &err); + $i(pioWrite, wrapped, ft_bytes(line.ptr, line.len), &err); ft_strbuf_reset_for_reuse(&line); @@ -2575,29 +2573,17 @@ write_backup_filelist(pgBackup *backup, parray *files, const char *root, ft_strbuf_free(&line); - if (sync) - FIN_CRC32C(backup->content_crc); - - err = $i(pioWriteFinish, out); + err = $i(pioWriteFinish, wrapped); if ($haserr(err)) ft_logerr(FT_FATAL, $errmsg(err), "Flushing " DATABASE_FILE_LIST ".tmp"); - /* if (sync) - { - err = pioSync(out); - if ($haserr(err)) - ft_logerr(FT_FATAL, $errmsg(err), "Sync " DATABASE_FILE_LIST ".tmp"); - } */ + if (sync) + backup->content_crc = pioCRC32Counter_getCRC32(crc); - err = $i(pioClose, out, .sync=true); + err = $i(pioClose, out, .sync=sync); if ($haserr(err)) ft_logerr(FT_FATAL, $errmsg(err), "Closing " DATABASE_FILE_LIST ".tmp"); - err = $i(pioRename, backup->backup_location, - .old_path = control_path_temp, .new_path = control_path); - if ($haserr(err)) - ft_logerr(FT_FATAL, $errmsg(err), "Renaming " DATABASE_FILE_LIST ".tmp"); - /* use extra variable to avoid reset of previous data_bytes value in case of error */ backup->data_bytes = backup_size_on_disk; backup->uncompressed_bytes = uncompressed_size_on_disk; From ac4e509a8218d0693374f54d4ea8efff2e34e09e Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 13 Dec 2022 16:55:18 +0300 Subject: [PATCH 168/339] [PBCKP-395] use pioOpenRewrite in pioLocalDrive_pioWriteFile --- src/utils/file.c | 101 ++++++----------------------------------------- 1 file changed, 12 insertions(+), 89 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index d5d60744f..caad72e72 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -3939,9 +3939,7 @@ pioLocalDrive_pioWriteFile(VSelf, path_t path, ft_bytes_t content, bool binary) FOBJ_FUNC_ARP(); Self(pioLocalDrive); err_i err; - ft_str_t temppath = ft_str(NULL, 0); - int fd = -1; - ssize_t r; + pioWriteCloser_i fl; fobj_reset_err(&err); @@ -3952,95 +3950,20 @@ pioLocalDrive_pioWriteFile(VSelf, path_t path, ft_bytes_t content, bool binary) return $iresult(err); } - if (content.len == 0) - { - /* just create file */ - fd = creat(path, FILE_PERMISSION); - if (fd < 0) - { - err = $syserr(errno, "Create file for {path} failed", path(path)); - return $iresult(err); - } - if (fsync(fd) < 0) - { - err = $syserr(errno, "Closing file {path} failed", path(path)); - close(fd); - return $iresult(err); - } - if (close(fd) < 0) - { - err = $syserr(errno, "Closing file {path} failed", path(path)); - return $iresult(err); - } - return $noerr(); - } - - /* make temporary name */ - temppath = ft_asprintf("%s~tmpXXXXXX", path); - fd = mkstemp(temppath.ptr); - if (fd < 0) - { - err = $syserr(errno, "Create temp file for {path} failed", path(path)); - goto error; - } - -#if PG_BINARY - if (binary && _setmode(fd, PG_BINARY) < 0) - { - err = $syserr(errno, "Set file mode for {path} failed", path(temppath.ptr)); - goto error; - } -#endif - - r = durable_write(fd, content.ptr, content.len); - if (r < 0) - { - err = $syserr(errno, "Cannot write to file {path:q}", - path(temppath.ptr)); - goto error; - } - - if (r < content.len) - { - err = $err(SysErr, "Short write on {path:q}: {writtenSz} < {wantedSz}", - path(temppath.ptr), writtenSz(r), wantedSz(content.len), - errNo(EIO)); - goto error; - } - - if (fsync(fd) < 0) - { - err = $syserr(errno, "Cannot fsync file {path:q}", - path(temppath.ptr)); - goto error; - } - - if (close(fd) < 0) - { - err = $syserr(errno, "Cannot close file {path:q}", - path(temppath.ptr)); - goto error; - } - fd = -1; + fl = $(pioOpenRewrite, self, path, + .binary = binary, .err = &err); + if ($haserr(err)) + return $iresult(err); - if (rename(temppath.ptr, path) < 0) - { - err = $syserr(errno, "Cannot close file {path:q}", - path(temppath.ptr)); - goto error; - } + $i(pioWrite, fl, content, .err = &err); + if ($haserr(err)) + return $iresult(err); - ft_str_free(&temppath); - return $noerr(); + err = $i(pioWriteFinish, fl); + if ($haserr(err)) + return $iresult(err); -error: - if (fd >= 0) - close(fd); - if (temppath.len > 0) - { - remove(temppath.ptr); - ft_str_free(&temppath); - } + err = $i(pioClose, fl); return $iresult(err); } From 644d5f72c8a4c7aee901eaee1ad6c2acbab63a35 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 13 Dec 2022 17:59:49 +0300 Subject: [PATCH 169/339] [PBCKP-395] use pioOpenRewrite in archive.c: push_file_internal As a unfortunate side effect it leads to absence of concurrent archiving detection. This way "last-win" happens. S3 gives no way to solve the issue (and this change is preparatino for S3). But we could use file-locking if storage is on-disk. So push_file_internal should be re-improved to use it. test_archive_push_part_file_exists_not_stale is disabled and should be rewritten to accompany rewritten push_file_internal with file-locking. test_archive_push_partial_file_exists is removed completely. --- src/archive.c | 185 ++++++++---------------------------------- src/pg_probackup.h | 8 +- tests/archive_test.py | 81 ++---------------- 3 files changed, 45 insertions(+), 229 deletions(-) diff --git a/src/archive.c b/src/archive.c index 3abecfbd0..7f4ecf12f 100644 --- a/src/archive.c +++ b/src/archive.c @@ -13,12 +13,12 @@ #include "utils/thread.h" #include "portability/instr_time.h" -static int push_file_internal(const char *wal_file_name, +static err_i push_file_internal(const char *wal_file_name, const char *pg_xlog_dir, const char *archive_dir, bool overwrite, bool no_sync, bool is_compress, int compress_level, - uint32 archive_timeout); + uint32 archive_timeout, bool *skipped); static void *push_files(void *arg); static void *get_files(void *arg); static bool get_wal_file(const char *filename, const char *from_path, const char *to_path, @@ -324,14 +324,19 @@ push_file(WALSegno *xlogfile, const char *archive_status_dir, bool no_ready_rename, bool is_compress, int compress_level) { - int rc; + bool skipped = false; + err_i err; elog(LOG, "pushing file \"%s\"", xlogfile->name); - rc = push_file_internal(xlogfile->name, pg_xlog_dir, + err = push_file_internal(xlogfile->name, pg_xlog_dir, archive_dir, overwrite, no_sync, is_compress, compress_level, - archive_timeout); + archive_timeout, &skipped); + if ($haserr(err)) + { + ft_logerr(FT_ERROR, $errmsg(err), "Archiving %s", xlogfile->name); + } /* take '--no-ready-rename' flag into account */ if (!no_ready_rename && archive_status_dir != NULL) @@ -355,15 +360,7 @@ push_file(WALSegno *xlogfile, const char *archive_status_dir, wal_file_ready, wal_file_done, strerror(errno)); } - return rc; -} - -static void -remove_temp_wal_file(pioDrive_i backup_drive, char *partpath) -{ - err_i remerr = $i(pioRemove, backup_drive, partpath, false); - if ($haserr(remerr)) - elog(WARNING, "Temp WAL: %s", $errmsg(remerr)); + return skipped; } /* @@ -374,24 +371,18 @@ remove_temp_wal_file(pioDrive_i backup_drive, char *partpath) * 1 - push was skipped because file already exists in the archive and * has the same checksum */ -int +err_i push_file_internal(const char *wal_file_name, const char *pg_xlog_dir, const char *archive_dir, bool overwrite, bool no_sync, bool is_compress, int compress_level, - uint32 archive_timeout) + uint32 archive_timeout, bool *skipped) { FOBJ_FUNC_ARP(); pioFile_i in; - pioFile_i out; - char *buf = pgut_malloc(OUT_BUF_SIZE); /* 1MB buffer */ + pioWriteCloser_i out; char from_fullpath[MAXPGPATH]; char to_fullpath[MAXPGPATH]; - char to_fullpath_part[MAXPGPATH]; /* partial handling */ - pio_stat_t st; - int partial_try_count = 0; - int64_t partial_file_size = 0; - bool partial_is_stale = true; size_t len; err_i err = $noerr(); @@ -409,100 +400,10 @@ push_file_internal(const char *wal_file_name, const char *pg_xlog_dir, /* destination file with .gz suffix */ len = ft_strlcat(to_fullpath, ".gz", sizeof(to_fullpath)); if (len >= sizeof(to_fullpath)) - elog(ERROR, "File path too long: \"%s\"", to_fullpath); + return $iresult($err(RT, "File path too long: {path:q}", + path(to_fullpath))); } /* open destination partial file for write */ - len = snprintf(to_fullpath_part, sizeof(to_fullpath_part), "%s.part", - to_fullpath); - if (len >= sizeof(to_fullpath)) - elog(ERROR, "File path too long: \"%s\"", to_fullpath); - - /* Open source file for read */ - in = $i(pioOpen, db_drive, from_fullpath, O_RDONLY | PG_BINARY, .err = &err); - if ($haserr(err)) - elog(ERROR, "Source file: %s", $errmsg(err)); - - retry_open: - out = $i(pioOpen, backup_drive, to_fullpath_part, - .flags = O_WRONLY | O_CREAT | O_EXCL | PG_BINARY, - .err = &err); - if ($noerr(err)) - goto part_opened; - - if (getErrno(err) != EEXIST) - /* Already existing destination temp file is not an error condition */ - elog(ERROR, "Temp WAL file: %s", $errmsg(err)); - - /* - * Partial file already exists, it could have happened due to: - * 1. failed archive-push - * 2. concurrent archiving - * - * For ARCHIVE_TIMEOUT period we will try to create partial file - * and look for the size of already existing partial file, to - * determine if it is changing or not. - * If after ARCHIVE_TIMEOUT we still failed to create partial - * file, we will make a decision about discarding - * already existing partial file. - */ - - while (partial_try_count < archive_timeout) - { - FOBJ_LOOP_ARP(); - st = $i(pioStat, backup_drive, .path = to_fullpath_part, - .follow_symlink = false, .err = &err); - if ($haserr(err)) - { - if (getErrno(err) == ENOENT) - //part file is gone, lets try to grab it - goto retry_open; - else - elog(ERROR, "Temp WAL: %s", $errmsg(err)); - } - - /* first round */ - if (!partial_try_count) - { - elog(LOG, - "Temp WAL file already exists, waiting on it %u seconds: \"%s\"", - archive_timeout, to_fullpath_part); - partial_file_size = st.pst_size; - } - - /* file size is changing */ - if (st.pst_size != partial_file_size) - partial_is_stale = false; - - sleep(1); - partial_try_count++; - } - /* The possible exit conditions: - * 1. File is not grabbed, and it is not stale - * 2. File is not grabbed, and it is stale. - */ - - /* - * If temp file was not grabbed for ARCHIVE_TIMEOUT and temp file is not stale, - * then exit with error. - */ - if (!partial_is_stale) - elog(ERROR, "Failed to open temp WAL file \"%s\" in %i seconds", - to_fullpath_part, archive_timeout); - - /* Partial segment is considered stale, so reuse it */ - elog(LOG, "Reusing stale temp WAL file \"%s\"", to_fullpath_part); - err = $i(pioRemove, backup_drive, .path = to_fullpath_part, .missing_ok = false); - if ($haserr(err)) - elog(ERROR, "Temp WAL: %s", $errmsg(err)); - - out = $i(pioOpen, backup_drive, .path = to_fullpath_part, - .flags = O_WRONLY | O_CREAT | O_EXCL | PG_BINARY, - .err = &err); - if ($haserr(err)) - elog(ERROR, "Temp WAL: %s", $errmsg(err)); - - part_opened: - elog(LOG, "Temp WAL file successfully created: \"%s\"", to_fullpath_part); if ($i(pioExists, backup_drive, .path = to_fullpath, .err = &err)) { @@ -512,23 +413,19 @@ push_file_internal(const char *wal_file_name, const char *pg_xlog_dir, crc32_src = $i(pioGetCRC32, db_drive, from_fullpath, .compressed = false, .err = &err); if ($haserr(err)) - elog(ERROR, "Cannot count crc32 for source file \"%s\": %s", - from_fullpath, $errmsg(err)); + return $iresult(err); crc32_dst = $i(pioGetCRC32, backup_drive, to_fullpath, .compressed = is_compress, .err = &err); if ($haserr(err)) - elog(ERROR, "Cannot count crc32 for destination file \"%s\": %s", - to_fullpath, $errmsg(err)); + return $iresult(err); if (crc32_src == crc32_dst) { elog(LOG, "WAL file already exists in archive with the same " "checksum, skip pushing: \"%s\"", from_fullpath); - $i(pioClose, in); - $i(pioClose, out); - remove_temp_wal_file(backup_drive, to_fullpath_part); - return 1; + *skipped = true; + return $noerr(); } else if (overwrite) { @@ -538,19 +435,25 @@ push_file_internal(const char *wal_file_name, const char *pg_xlog_dir, } else { - $i(pioClose, in); - $i(pioClose, out); - remove_temp_wal_file(backup_drive, to_fullpath_part); - - elog(ERROR, "WAL file already exists in archive with " - "different checksum: \"%s\"", to_fullpath); + return $iresult($err(RT, "WAL file already exists in archive with " + "different checksum: {path:q}", + path(to_fullpath))); } } else if ($haserr(err)) { - elog(ERROR, "%s", $errmsg(err)); + return $iresult(err); } + /* Open source file for read */ + in = $i(pioOpen, db_drive, from_fullpath, O_RDONLY | PG_BINARY, .err = &err); + if ($haserr(err)) + return $iresult(err); + + out = $i(pioOpenRewrite, backup_drive, .path = to_fullpath, .err = &err); + if ($haserr(err)) + return $iresult(err); + /* enable streaming compression */ if (is_compress) { @@ -573,29 +476,13 @@ push_file_internal(const char *wal_file_name, const char *pg_xlog_dir, $i(pioClose, in); /* ignore error */ if ($haserr(err)) - { - $i(pioClose, out); - remove_temp_wal_file(backup_drive, to_fullpath_part); - elog(ERROR, "Copy WAL: %s", $errmsg(err)); - } + return $iresult(err); err = $i(pioClose, out, .sync = !no_sync); if ($haserr(err)) - { - remove_temp_wal_file(backup_drive, to_fullpath_part); - elog(ERROR, "Temp WAL: %s", $errmsg(err)); - } + return $iresult(err); - /* Rename temp file to destination file */ - err = $i(pioRename, backup_drive, to_fullpath_part, to_fullpath); - if ($haserr(err)) - { - remove_temp_wal_file(backup_drive, to_fullpath_part); - elog(ERROR, "%s", $errmsg(err)); - } - - free(buf); - return 0; + return $noerr(); } /* Copy file attributes */ diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 9a92d61d0..984cae95b 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -693,14 +693,14 @@ typedef struct StopBackupCallbackParams strcmp((fname) + XLOG_FNAME_LEN, ".gz.partial") == 0) #define IsTempXLogFileName(fname) \ - (strlen(fname) == XLOG_FNAME_LEN + strlen(".part") && \ + (strlen(fname) == XLOG_FNAME_LEN + strlen("~tmp") + 6 && \ strspn(fname, "0123456789ABCDEF") == XLOG_FNAME_LEN && \ - strcmp((fname) + XLOG_FNAME_LEN, ".part") == 0) + strncmp((fname) + XLOG_FNAME_LEN, "~tmp", 4) == 0) #define IsTempCompressXLogFileName(fname) \ - (strlen(fname) == XLOG_FNAME_LEN + strlen(".gz.part") && \ + (strlen(fname) == XLOG_FNAME_LEN + strlen(".gz~tmp") + 6 && \ strspn(fname, "0123456789ABCDEF") == XLOG_FNAME_LEN && \ - strcmp((fname) + XLOG_FNAME_LEN, ".gz.part") == 0) + strncmp((fname) + XLOG_FNAME_LEN, ".gz~tmp", 7) == 0) #define IsSshProtocol() (instance_config.remote.host && strcmp(instance_config.remote.proto, "ssh") == 0) diff --git a/tests/archive_test.py b/tests/archive_test.py index 5f5f6566c..af7bef721 100644 --- a/tests/archive_test.py +++ b/tests/archive_test.py @@ -491,82 +491,11 @@ def test_archive_push_file_exists_overwrite(self): 'WAL file already exists in archive with ' 'different checksum, overwriting', log_content) - # @unittest.skip("skip") - def test_archive_push_partial_file_exists(self): - """Archive-push if stale '.part' file exists""" - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving( - backup_dir, 'node', node, - log_level='verbose', archive_timeout=60) - - node.slow_start() - - # this backup is needed only for validation to xid - self.backup_node(backup_dir, 'node', node) - - node.safe_psql( - "postgres", - "create table t1(a int)") - - xid = node.safe_psql( - "postgres", - "INSERT INTO t1 VALUES (1) RETURNING (xmin)").decode('utf-8').rstrip() - - filename_orig = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn());").rstrip() - - filename_orig = filename_orig.decode('utf-8') - - # form up path to next .part WAL segment - wals_dir = os.path.join(backup_dir, 'wal', 'node') - if self.archive_compress: - filename = filename_orig + '.gz' + '.part' - file = os.path.join(wals_dir, filename) - else: - filename = filename_orig + '.part' - file = os.path.join(wals_dir, filename) - - # emulate stale .part file - with open(file, 'a+b') as f: - f.write(b"blahblah") - f.flush() - f.close() - - self.switch_wal_segment(node) - sleep(70) - - # check that segment is archived - if self.archive_compress: - filename_orig = filename_orig + '.gz' - - file = os.path.join(wals_dir, filename_orig) - self.assertTrue(os.path.isfile(file)) - - # successful validate means that archive-push reused stale wal segment - self.validate_pb( - backup_dir, 'node', - options=['--recovery-target-xid={0}'.format(xid)]) - - log_file = os.path.join(node.logs_dir, 'postgresql.log') - with open(log_file, 'r') as f: - log_content = f.read() - - self.assertIn( - 'Reusing stale temp WAL file', - log_content) - - # @unittest.skip("skip") + @unittest.skip("should be redone with file locking") def test_archive_push_part_file_exists_not_stale(self): """Archive-push if .part file exists and it is not stale""" + # TODO: this test is not completely obsolete, but should be rewritten + # with use of file locking when push_file_internal will use it. backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( base_dir=os.path.join(self.module_name, self.fname, 'node'), @@ -2412,7 +2341,7 @@ def test_archive_show_partial_files_handling(self): os.rename( os.path.join(wals_dir, filename), - os.path.join(wals_dir, '{0}.part'.format(filename))) + os.path.join(wals_dir, '{0}~tmp123451'.format(filename))) # .gz.part file node.safe_psql( @@ -2430,7 +2359,7 @@ def test_archive_show_partial_files_handling(self): os.rename( os.path.join(wals_dir, filename), - os.path.join(wals_dir, '{0}.gz.part'.format(filename))) + os.path.join(wals_dir, '{0}.gz~tmp234513'.format(filename))) # .partial file node.safe_psql( From 5e82af7f3a2aaccfd8b8ccd3bf4925f943b4af8c Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 14 Dec 2022 06:42:59 +0300 Subject: [PATCH 170/339] [PBCKP-395] use pioOpenRewrite in create_empty_file --- src/data.c | 34 ++++++++++++++++++++-------------- src/pg_probackup.h | 3 +-- src/restore.c | 3 +-- 3 files changed, 22 insertions(+), 18 deletions(-) diff --git a/src/data.c b/src/data.c index 4e3ef01df..ba8c3f6ea 100644 --- a/src/data.c +++ b/src/data.c @@ -1206,27 +1206,33 @@ backup_non_data_file_internal(const char *from_fullpath, * Create empty file, used for partial restore */ bool -create_empty_file(fio_location from_location, const char *to_root, - fio_location to_location, pgFile *file) +create_empty_file(const char *to_root, fio_location to_location, pgFile *file) { + FOBJ_FUNC_ARP(); char to_path[MAXPGPATH]; - FILE *out; + pioDrive_i drive = pioDriveForLocation(to_location); + pioWriteCloser_i fl; + err_i err; /* open file for write */ join_path_components(to_path, to_root, file->rel_path); - out = fio_fopen(to_location, to_path, PG_BINARY_W); - - if (out == NULL) - elog(ERROR, "Cannot open destination file \"%s\": %s", - to_path, strerror(errno)); + /* + * TODO: possibly it is better to use pioWriteFile, but it doesn't have + * permissions parameter, and I don't want to introduce is just for one + * use case + */ + fl = $i(pioOpenRewrite, drive, + .permissions = file->mode, + .use_temp = false, + .err = &err); + if ($haserr(err)) + ft_logerr(FT_ERROR, $errmsg(err), "Creating empty file"); - /* update file permission */ - if (fio_chmod(to_location, to_path, file->mode) == -1) - elog(ERROR, "Cannot change mode of \"%s\": %s", to_path, - strerror(errno)); + err = $i(pioWriteFinish, fl); + err = fobj_err_combine(err, $i(pioClose, fl, .sync=false)); - if (fio_fclose(out)) - elog(ERROR, "Cannot close \"%s\": %s", to_path, strerror(errno)); + if ($haserr(err)) + ft_logerr(FT_ERROR, $errmsg(err), "Closing empty file"); return true; } diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 984cae95b..209eb5511 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1054,8 +1054,7 @@ extern size_t restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, bool already_exists); extern void restore_non_data_file_internal(FILE *in, FILE *out, pgFile *file, const char *from_fullpath, const char *to_fullpath); -extern bool create_empty_file(fio_location from_location, const char *to_root, - fio_location to_location, pgFile *file); +extern bool create_empty_file(const char *to_root, fio_location to_location, pgFile *file); extern PageState *get_checksum_map(const char *fullpath, uint32 checksum_version, int n_blocks, XLogRecPtr dest_stop_lsn, BlockNumber segmentno); diff --git a/src/restore.c b/src/restore.c index 25d227c2e..c098e7ea9 100644 --- a/src/restore.c +++ b/src/restore.c @@ -1158,8 +1158,7 @@ restore_files(void *arg) * We cannot simply skip the file, because it may lead to * failure during WAL redo; hence, create empty file. */ - create_empty_file(FIO_BACKUP_HOST, - arguments->to_root, FIO_DB_HOST, dest_file); + create_empty_file(arguments->to_root, FIO_DB_HOST, dest_file); elog(LOG, "Skip file due to partial restore: \"%s\"", dest_file->rel_path); From 9e1f00de79c8ad1415c5590bbf39261de407b082 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 14 Dec 2022 08:15:53 +0300 Subject: [PATCH 171/339] [PBCKP-395] add pioDBDrive.pioOpenWrite for opening file with pioSeek pioSeek is used in restorations, therefore we have to have it. But it is not usable for S3, there it is not in pioDrive. --- src/utils/file.c | 186 ++++++++++++++++++++++++++++++++++++++++++++++- src/utils/file.h | 10 ++- 2 files changed, 191 insertions(+), 5 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index caad72e72..1ee7d84d4 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -101,6 +101,11 @@ struct __attribute__((packed)) fio_req_open_rewrite { bool use_temp; }; +struct __attribute__((packed)) fio_req_open_write { + uint32_t permissions; + bool exclusive; +}; + /* Convert FIO pseudo handle to index in file descriptor array */ #define fio_fileno(f) (((size_t)f - 1) | FIO_PIPE_MARKER) @@ -3361,6 +3366,30 @@ fio_communicate(int in, int out) } break; } + case PIO_OPEN_WRITE: + { + struct fio_req_open_write *req = (void*)buf; + const char *path = buf + sizeof(*req); + pioDBWriter_i fl; + err_i err; + + ft_assert(hdr.handle >= 0); + ft_assert(objs[hdr.handle] == NULL); + + fl = $i(pioOpenWrite, drive, .path = path, + .permissions = req->permissions, + .exclusive = req->exclusive, + .err = &err); + if ($haserr(err)) + fio_send_pio_err(out, err); + else + { + hdr.size = 0; + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); + objs[hdr.handle] = $ref(fl.self); + } + break; + } case PIO_WRITE_ASYNC: { err_i err; @@ -3374,6 +3403,22 @@ fio_communicate(int in, int out) $iset(&async_errs[hdr.handle], err); break; } + case PIO_SEEK: + { + err_i err; + uint64_t offs; + + ft_assert(hdr.handle >= 0); + ft_assert(objs[hdr.handle] != NULL); + ft_assert(hdr.size == sizeof(uint64_t)); + + memcpy(&offs, buf, sizeof(uint64_t)); + + err = $(pioSeek, objs[hdr.handle], offs); + if ($haserr(err)) + $iset(&async_errs[hdr.handle], err); + break; + } case PIO_GET_ASYNC_ERROR: { ft_assert(hdr.handle >= 0); @@ -3473,8 +3518,8 @@ typedef struct pioLocalWriteFile bool use_temp; bool renamed; } pioLocalWriteFile; -#define kls__pioLocalWriteFile iface__pioWriteCloser, mth(fobjDispose), \ - iface(pioWriteCloser) +#define kls__pioLocalWriteFile iface__pioDBWriter, mth(fobjDispose), \ + iface(pioWriteCloser, pioDBWriter) fobj_klass(pioLocalWriteFile); typedef struct pioRemoteFile @@ -3497,8 +3542,8 @@ typedef struct pioRemoteWriteFile { ft_str_t path; int handle; } pioRemoteWriteFile; -#define kls__pioRemoteWriteFile iface__pioWriteCloser, mth(fobjDispose), \ - iface(pioWriteCloser) +#define kls__pioRemoteWriteFile iface__pioDBWriter, mth(fobjDispose), \ + iface(pioWriteCloser, pioDBWriter) fobj_klass(pioRemoteWriteFile); typedef struct pioReadFilter { @@ -3706,6 +3751,55 @@ pioLocalDrive_pioOpenRewrite(VSelf, path_t path, int permissions, return $bind(pioWriteCloser, res); } +static pioDBWriter_i +pioLocalDrive_pioOpenWrite(VSelf, path_t path, int permissions, + bool exclusive, err_i *err) +{ + Self(pioLocalDrive); + int fd = -1; + FILE* fl; + ft_bytes_t buf; + fobj_t res; + int flags; + + fobj_reset_err(err); + + flags = O_CREAT|O_WRONLY|PG_BINARY; + if (exclusive) + flags |= O_EXCL; + + fd = open(path, flags, permissions); + + if (fd < 0) + { + *err = $syserr(errno, "Create file {path} failed", path(path)); + close(fd); + return $null(pioDBWriter); + } + + if (!exclusive && chmod(path, permissions)) + { + *err = $syserr(errno, "Changing permissions for {path} failed", + path(path)); + close(fd); + return $null(pioDBWriter); + } + + fl = fdopen(fd, "wb"); + ft_assert(fl != NULL); + + buf = ft_bytes_alloc(CHUNK_SIZE); + setvbuf(fl, buf.ptr, _IOFBF, buf.len); + + res = $alloc(pioLocalWriteFile, + .path = ft_strdupc(path), + .path_tmp = ft_strdupc(path), + .use_temp = false, + .fl = fl, + .buf = buf); + return $bind(pioDBWriter, res); +} + static pio_stat_t pioLocalDrive_pioStat(VSelf, path_t path, bool follow_symlink, err_i *err) { @@ -4113,6 +4207,19 @@ pioLocalWriteFile_pioWrite(VSelf, ft_bytes_t buf, err_i* err) return r; } +static err_i +pioLocalWriteFile_pioSeek(VSelf, off_t offs) +{ + Self(pioLocalWriteFile); + + ft_assert(self->fl != NULL, "Closed file abused \"%s\"", self->path.ptr); + + if (fseeko(self->fl, offs, SEEK_SET)) + return $syserr(errno, "Can not seek to {offs} in file {path:q}", offs(offs), path(self->path.ptr)); + + return $noerr(); +} + static err_i pioLocalWriteFile_pioWriteFinish(VSelf) { @@ -5070,6 +5177,54 @@ pioRemoteDrive_pioOpenRewrite(VSelf, path_t path, int permissions, return $bind(pioWriteCloser, fl); } +static pioDBWriter_i +pioRemoteDrive_pioOpenWrite(VSelf, path_t path, int permissions, + bool exclusive, err_i *err) +{ + Self(pioRemoteDrive); + ft_strbuf_t buf = ft_strbuf_zero(); + fobj_t fl; + int handle = find_free_handle(); + + fio_header hdr = { + .cop = PIO_OPEN_WRITE, + .handle = handle, + }; + + struct fio_req_open_write req = { + .permissions = permissions, + .exclusive = exclusive, + }; + + fio_ensure_remote(); + + ft_strbuf_catbytes(&buf, ft_bytes(&hdr, sizeof(hdr))); + ft_strbuf_catbytes(&buf, ft_bytes(&req, sizeof(req))); + ft_strbuf_catc(&buf, path); + ft_strbuf_cat1(&buf, '\0'); + + ((fio_header*)buf.ptr)->size = buf.len - sizeof(hdr); + + IO_CHECK(fio_write_all(fio_stdout, buf.ptr, buf.len), buf.len); + + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + + if (hdr.cop == FIO_PIO_ERROR) + { + *err = fio_receive_pio_err(&hdr); + return $null(pioDBWriter); + } + ft_dbg_assert(hdr.cop == PIO_OPEN_WRITE && + hdr.handle == handle); + + set_handle(handle); + + fl = $alloc(pioRemoteWriteFile, + .path = ft_strdupc(path), + .handle = handle); + return $bind(pioDBWriter, fl); +} + static size_t pioRemoteWriteFile_pioWrite(VSelf, ft_bytes_t buf, err_i* err) { @@ -5095,6 +5250,29 @@ pioRemoteWriteFile_pioWrite(VSelf, ft_bytes_t buf, err_i* err) return buf.len; } +static err_i +pioRemoteWriteFile_pioSeek(VSelf, off_t offs) +{ + Self(pioRemoteWriteFile); + struct __attribute__((packed)) { + fio_header hdr; + uint64_t off; + } req = { + .hdr = { + .cop = PIO_SEEK, + .handle = self->handle, + .size = sizeof(uint64_t), + }, + .off = offs, + }; + + ft_assert(self->handle >= 0, "Remote closed file abused \"%s\"", self->path.ptr); + + IO_CHECK(fio_write_all(fio_stdout, &req, sizeof(req)), sizeof(req)); + + return $noerr(); +} + static err_i pioRemoteWriteFile_pioWriteFinish(VSelf) { diff --git a/src/utils/file.h b/src/utils/file.h index 71710de19..e543871e8 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -76,7 +76,9 @@ typedef enum FIO_ITERATE_DATA, FIO_ITERATE_EOF, PIO_OPEN_REWRITE, + PIO_OPEN_WRITE, PIO_WRITE_ASYNC, + PIO_SEEK, PIO_GET_ASYNC_ERROR, PIO_CLOSE, PIO_DISPOSE, @@ -255,10 +257,12 @@ fobj_method(pioSeek); #define iface__pioFile mth(pioWrite, pioWriteFinish, pioRead, pioTruncate, pioClose, pioSeek) #define iface__pioWriteFlush mth(pioWrite, pioWriteFinish) #define iface__pioWriteCloser mth(pioWrite, pioWriteFinish, pioClose) +#define iface__pioDBWriter mth(pioWrite, pioSeek, pioWriteFinish, pioClose) #define iface__pioReadCloser mth(pioRead, pioClose) fobj_iface(pioFile); fobj_iface(pioWriteFlush); fobj_iface(pioWriteCloser); +fobj_iface(pioDBWriter); fobj_iface(pioReadCloser); // Pages iterator @@ -285,6 +289,9 @@ fobj_iface(pioPagesIterator); #define mth__pioOpen pioFile_i, (path_t, path), (int, flags), \ (int, permissions), (err_i *, err) #define mth__pioOpen__optional() (permissions, FILE_PERMISSION) +#define mth__pioOpenWrite pioDBWriter_i, (path_t, path), (int, permissions), \ + (bool, exclusive), (err_i *, err) +#define mth__pioOpenWrite__optional() (exclusive, false), (permissions, FILE_PERMISSION) #define mth__pioOpenRewrite pioWriteCloser_i, (path_t, path), (int, permissions), \ (bool, binary), (bool, use_temp), \ (err_i *, err) @@ -322,6 +329,7 @@ fobj_iface(pioPagesIterator); fobj_method(pioOpen); fobj_method(pioOpenRewrite); +fobj_method(pioOpenWrite); fobj_method(pioStat); fobj_method(pioRemove); fobj_method(pioRename); @@ -343,7 +351,7 @@ fobj_method(pioIteratePages); mth(pioOpenRewrite) fobj_iface(pioDrive); -#define iface__pioDBDrive iface__pioDrive, mth(pioIteratePages) +#define iface__pioDBDrive iface__pioDrive, mth(pioIteratePages, pioOpenWrite) fobj_iface(pioDBDrive); extern pioDrive_i pioDriveForLocation(fio_location location); From 2428adb7a50b0ef9ad9095dc16cad71d9e83112e Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 14 Dec 2022 08:29:49 +0300 Subject: [PATCH 172/339] [PBCKP-395] and looks pioDBWriter needs pioTruncate --- src/utils/file.c | 59 ++++++++++++++++++++++++++++++++++++++++++++++++ src/utils/file.h | 3 ++- 2 files changed, 61 insertions(+), 1 deletion(-) diff --git a/src/utils/file.c b/src/utils/file.c index 1ee7d84d4..cc2f30890 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -3419,6 +3419,22 @@ fio_communicate(int in, int out) $iset(&async_errs[hdr.handle], err); break; } + case PIO_TRUNCATE: + { + err_i err; + uint64_t offs; + + ft_assert(hdr.handle >= 0); + ft_assert(objs[hdr.handle] != NULL); + ft_assert(hdr.size == sizeof(uint64_t)); + + memcpy(&offs, buf, sizeof(uint64_t)); + + err = $(pioTruncate, objs[hdr.handle], offs); + if ($haserr(err)) + $iset(&async_errs[hdr.handle], err); + break; + } case PIO_GET_ASYNC_ERROR: { ft_assert(hdr.handle >= 0); @@ -4232,6 +4248,25 @@ pioLocalWriteFile_pioWriteFinish(VSelf) return err; } +static err_i +pioLocalWriteFile_pioTruncate(VSelf, size_t sz) +{ + Self(pioLocalWriteFile); + ft_assert(self->fl != NULL, "Closed file abused \"%s\"", self->path_tmp.ptr); + + /* it is better to flush before we will truncate */ + if (fflush(self->fl)) + return $syserr(errno, "Cannot flush file {path:q}", + path(self->path_tmp.ptr)); + + if (ftruncate(fileno(self->fl), sz) < 0) + return $syserr(errno, "Cannot truncate file {path:q}", + path(self->path_tmp.ptr)); + /* TODO: what to do with file position? */ + + return $noerr(); +} + static err_i pioLocalWriteFile_pioClose(VSelf, bool sync) { @@ -5297,6 +5332,30 @@ pioRemoteWriteFile_pioWriteFinish(VSelf) return $noerr(); } +static err_i +pioRemoteWriteFile_pioTruncate(VSelf, size_t sz) +{ + Self(pioRemoteWriteFile); + + struct __attribute__((packed)) { + fio_header hdr; + uint64_t off; + } req = { + .hdr = { + .cop = PIO_TRUNCATE, + .handle = self->handle, + .size = sizeof(uint64_t), + }, + .off = sz, + }; + + ft_assert(self->handle >= 0, "Remote closed file abused \"%s\"", self->path.ptr); + + IO_CHECK(fio_write_all(fio_stdout, &req, sizeof(req)), sizeof(req)); + + return $noerr(); +} + static err_i pioRemoteWriteFile_pioClose(VSelf, bool sync) { diff --git a/src/utils/file.h b/src/utils/file.h index e543871e8..15e23bde8 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -79,6 +79,7 @@ typedef enum PIO_OPEN_WRITE, PIO_WRITE_ASYNC, PIO_SEEK, + PIO_TRUNCATE, PIO_GET_ASYNC_ERROR, PIO_CLOSE, PIO_DISPOSE, @@ -257,7 +258,7 @@ fobj_method(pioSeek); #define iface__pioFile mth(pioWrite, pioWriteFinish, pioRead, pioTruncate, pioClose, pioSeek) #define iface__pioWriteFlush mth(pioWrite, pioWriteFinish) #define iface__pioWriteCloser mth(pioWrite, pioWriteFinish, pioClose) -#define iface__pioDBWriter mth(pioWrite, pioSeek, pioWriteFinish, pioClose) +#define iface__pioDBWriter mth(pioWrite, pioSeek, pioWriteFinish, pioTruncate, pioClose) #define iface__pioReadCloser mth(pioRead, pioClose) fobj_iface(pioFile); fobj_iface(pioWriteFlush); From 662833bb374f11df3bb28586c3e0f91ab20dee73 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 14 Dec 2022 08:34:22 +0300 Subject: [PATCH 173/339] [PBCKP-395] use pioOpenWrite in archive.c get_wal_file --- src/archive.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/archive.c b/src/archive.c index 7f4ecf12f..3ef6be590 100644 --- a/src/archive.c +++ b/src/archive.c @@ -942,7 +942,7 @@ get_wal_file(const char *filename, const char *from_fullpath, const char *to_fullpath, bool prefetch_mode) { FOBJ_FUNC_ARP(); - pioFile_i out = {NULL}; + pioDBWriter_i out = {NULL}; pioFile_i in = {NULL}; char *buf = pgut_malloc(OUT_BUF_SIZE); /* 1MB buffer */ err_i err = $noerr(); @@ -950,15 +950,15 @@ get_wal_file(const char *filename, const char *from_fullpath, bool compressed = false; bool src_partial = false; - pioDrive_i db_drive = pioDriveForLocation(FIO_DB_HOST); + pioDBDrive_i db_drive = pioDBDriveForLocation(FIO_DB_HOST); pioDrive_i backup_drive = pioDriveForLocation(FIO_BACKUP_HOST); snprintf(from_fullpath_gz, sizeof(from_fullpath_gz), "%s.gz", from_fullpath); /* open destination file */ - out = $i(pioOpen, db_drive, .path = to_fullpath, .err = &err, - .flags = O_WRONLY | O_CREAT | O_EXCL | O_TRUNC | PG_BINARY); + out = $i(pioOpenWrite, db_drive, .path = to_fullpath, .err = &err, + .exclusive = true); if ($haserr(err)) { elog(WARNING, "%s", $errmsg(err)); From 55fbf0b9549878e578ffe171d1a6a41336b9d4aa Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 14 Dec 2022 08:40:55 +0300 Subject: [PATCH 174/339] [PBCKP-395] use pioOpenWrite in data.c copy_pages --- src/data.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/src/data.c b/src/data.c index ba8c3f6ea..df9fa05c1 100644 --- a/src/data.c +++ b/src/data.c @@ -317,7 +317,7 @@ backup_page(pioWrite_i out, BlockNumber blknum, ft_bytes_t page, /* Write page as-is. TODO: make it fastpath option in compress_and_backup_page() */ static int -write_page(pgFile *file, pioFile_i out, int blknum, Page page) +write_page(pgFile *file, pioDBWriter_i out, int blknum, Page page) { err_i err = $noerr(); off_t target = blknum * BLCKSZ; @@ -1875,7 +1875,7 @@ copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, pioDBDrive_i backup_location = pioDBDriveForLocation(FIO_BACKUP_HOST); err_i err = $noerr(); pioPagesIterator_i pages; - pioFile_i out; + pioDBWriter_i out; pages = doIteratePages(backup_location, .from_fullpath = from_fullpath, .file = file, .start_lsn = sync_lsn, @@ -1884,7 +1884,8 @@ copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, if ($haserr(err)) return $iresult(err); - out = $i(pioOpen, backup_location, to_fullpath, PG_BINARY|O_RDWR|O_CREAT, file->mode, &err); + out = $i(pioOpenWrite, backup_location, to_fullpath, + .permissions = file->mode, .err = &err); if ($haserr(err)) return $iresult(err); @@ -1917,8 +1918,13 @@ copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, if ($haserr(err)) return $iresult(err); - /* close local output file */ - $i(pioClose, out, true); + err = $i(pioWriteFinish, out); + if ($haserr(err)) + return $iresult(err); + + err = $i(pioClose, out, false); + if ($haserr(err)) + return $iresult(err); return $noerr(); } From e25f8ab1cc1d2417960dd2627e6cd5bf02378665 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 14 Dec 2022 08:59:28 +0300 Subject: [PATCH 175/339] [PBCKP-395] intermediate step: forbid writting on pgFile All write activity is moved to pioOpenWrite and pioOpenRewrite. pgFile will be renamed to pgReader and pgReadStream. --- src/utils/file.c | 215 ++--------------------------------------------- src/utils/file.h | 2 +- 2 files changed, 6 insertions(+), 211 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index cc2f30890..4d2738c06 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -3551,7 +3551,7 @@ typedef struct pioRemoteFile ft_bytes_t chunkRest; } pioRemoteFile; #define kls__pioRemoteFile iface__pioFile, iface(pioFile), \ - mth(pioSetAsync, pioAsyncRead, pioAsyncWrite, pioAsyncError) + mth(pioSetAsync, pioAsyncRead) fobj_klass(pioRemoteFile); typedef struct pioRemoteWriteFile { @@ -3685,6 +3685,8 @@ pioLocalDrive_pioOpen(VSelf, path_t path, int flags, fobj_reset_err(err); fobj_t file; + ft_assert((flags & O_ACCMODE) == O_RDONLY); + if (permissions == 0) fd = open(path, flags, FILE_PERMISSION); else @@ -4134,34 +4136,6 @@ pioLocalFile_pioRead(VSelf, ft_bytes_t buf, err_i *err) return r; } -static size_t -pioLocalFile_pioWrite(VSelf, ft_bytes_t buf, err_i *err) -{ - Self(pioLocalFile); - ssize_t r; - fobj_reset_err(err); - - ft_assert(self->fd >= 0, "Closed file abused \"%s\"", self->p.path); - - if (buf.len == 0) - return 0; - - r = durable_write(self->fd, buf.ptr, buf.len); - if (r < 0) - { - *err = $syserr(errno, "Cannot write to file {path:q}", - path(self->p.path)); - return 0; - } - if (r < buf.len) - { - *err = $err(SysErr, "Short write on {path:q}: {writtenSz} < {wantedSz}", - path(self->p.path), writtenSz(r), wantedSz(buf.len), - errNo(EIO)); - } - return r; -} - static err_i pioLocalFile_pioSeek(VSelf, off_t offs) { @@ -4177,27 +4151,6 @@ pioLocalFile_pioSeek(VSelf, off_t offs) return $noerr(); } -static err_i -pioLocalFile_pioWriteFinish(VSelf) -{ - Self(pioLocalFile); - ft_assert(self->fd >= 0, "Closed file abused \"%s\"", self->p.path); - /* do nothing for unbuffered file */ - return $noerr(); -} - -static err_i -pioLocalFile_pioTruncate(VSelf, size_t sz) -{ - Self(pioLocalFile); - ft_assert(self->fd >= 0, "Closed file abused \"%s\"", self->p.path); - - if (ftruncate(self->fd, sz) < 0) - return $syserr(errno, "Cannot truncate file {path:q}", - path(self->p.path)); - return $noerr(); -} - static fobjStr* pioLocalFile_fobjRepr(VSelf) { @@ -4362,6 +4315,8 @@ pioRemoteDrive_pioOpen(VSelf, path_t path, fobj_reset_err(err); fobj_t file; + ft_assert((flags & O_ACCMODE) == O_RDONLY); + handle = find_free_handle(); hdr.cop = FIO_OPEN; @@ -4739,28 +4694,6 @@ pioRemoteDrive_pioWriteFile(VSelf, path_t path, ft_bytes_t content, bool binary) /* REMOTE FILE */ -static err_i -pioRemoteFile_pioSync(VSelf) -{ - Self(pioRemoteFile); - - fio_header hdr; - hdr.cop = FIO_SYNC_FILE; - hdr.handle = self->handle; - hdr.arg = 0; - hdr.size = 0; - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); - - if (hdr.arg != 0) - { - return $syserr((int)hdr.arg, "Cannot fsync remote file {path:q}", - path(self->p.path)); - } - return $noerr(); -} - static err_i pioRemoteFile_doClose(VSelf) { @@ -4801,9 +4734,6 @@ pioRemoteFile_pioClose(VSelf, bool sync) ft_assert(self->handle >= 0, "Remote closed file abused \"%s\"", self->p.path); - if (sync && (self->p.flags & O_ACCMODE) != O_RDONLY) - err = pioRemoteFile_pioSync(self); - return fobj_err_combine(err, pioRemoteFile_doClose(self)); } @@ -4965,45 +4895,6 @@ pioRemoteFile_pioRead(VSelf, ft_bytes_t buf, err_i *err) return hdr.size; } -static size_t -pioRemoteFile_pioWrite(VSelf, ft_bytes_t buf, err_i *err) -{ - Self(pioRemoteFile); - fio_header hdr; - fobj_reset_err(err); - - ft_assert(self->handle >= 0, "Remote closed file abused \"%s\"", self->p.path); - - if (buf.len == 0) - return 0; - - if (self->asyncMode) - return pioAsyncWrite(self, buf, err); - - hdr = (fio_header){ - .cop = FIO_WRITE, - .handle = self->handle, - .size = buf.len, - .arg = 0, - }; - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(fio_stdout, buf.ptr, buf.len), buf.len); - - /* check results */ - IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); - ft_dbg_assert(hdr.cop == FIO_WRITE); - - /* set errno */ - if (hdr.arg != 0) { - *err = $syserr((int)hdr.arg, "Cannot write remote file {path:q}", - path(self->p.path)); - return 0; - } - - return buf.len; -} - static err_i pioRemoteFile_pioSeek(VSelf, off_t offs) { @@ -5022,37 +4913,6 @@ pioRemoteFile_pioSeek(VSelf, off_t offs) return $noerr(); } -static err_i -pioRemoteFile_pioWriteFinish(VSelf) -{ - Self(pioRemoteFile); - - ft_assert(self->handle >= 0, "Remote closed file abused \"%s\"", self->p.path); - - if (self->asyncMode) - return pioAsyncError(self); - return $noerr(); -} - -static err_i -pioRemoteFile_pioTruncate(VSelf, size_t sz) -{ - Self(pioRemoteFile); - - ft_assert(self->handle >= 0, "Remote closed file abused \"%s\"", self->p.path); - - fio_header hdr = { - .cop = FIO_TRUNCATE, - .handle = self->handle, - .size = 0, - .arg = sz, - }; - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - - return $noerr(); -} - static err_i pioRemoteFile_pioSetAsync(VSelf, bool async) { @@ -5062,80 +4922,15 @@ pioRemoteFile_pioSetAsync(VSelf, bool async) if (!self->asyncMode && async) { - if ((self->p.flags & O_ACCMODE) == O_RDWR) - return $err(RT, "Could not enable async mode on Read-Write file"); self->asyncMode = true; } else if (self->asyncMode && !async) { - err_i err = pioAsyncError(self); self->asyncMode = false; - return err; } return $noerr(); } -static size_t -pioRemoteFile_pioAsyncWrite(VSelf, ft_bytes_t buf, err_i *err) -{ - Self(pioRemoteFile); - fio_header hdr; - - ft_assert(self->handle >= 0, "Remote closed file abused \"%s\"", self->p.path); - - if ($haserr(self->asyncError)) { - *err = self->asyncError; - return 0; - } - - if (buf.len == 0) - return 0; - - hdr = (fio_header){ - .cop = FIO_WRITE_ASYNC, - .handle = self->handle, - .size = buf.len, - .arg = 0, - }; - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(fio_stdout, buf.ptr, buf.len), buf.len); - self->didAsync = true; - return buf.len; -} - -static err_i -pioRemoteFile_pioAsyncError(VSelf) -{ - Self(pioRemoteFile); - char *errmsg; - fio_header hdr; - - if ($haserr(self->asyncError) || !self->didAsync) - { - self->didAsync = false; - return self->asyncError; - } - - hdr.cop = FIO_GET_ASYNC_ERROR; - hdr.size = 0; - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - - /* check results */ - IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); - - if (hdr.size == 0) - return $noerr(); - - errmsg = pgut_malloc(ERRMSG_MAX_LEN); - IO_CHECK(fio_read_all(fio_stdin, errmsg, hdr.size), hdr.size); - self->asyncError = $err(SysErr, "{remotemsg}", remotemsg(errmsg)); - self->didAsync = false; - free(errmsg); - return self->asyncError; -} - static void pioRemoteFile_fobjDispose(VSelf) { diff --git a/src/utils/file.h b/src/utils/file.h index 15e23bde8..169cfc33e 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -255,7 +255,7 @@ fobj_method(pioTruncate); fobj_method(pioWriteFinish); fobj_method(pioSeek); -#define iface__pioFile mth(pioWrite, pioWriteFinish, pioRead, pioTruncate, pioClose, pioSeek) +#define iface__pioFile mth(pioRead, pioClose, pioSeek) #define iface__pioWriteFlush mth(pioWrite, pioWriteFinish) #define iface__pioWriteCloser mth(pioWrite, pioWriteFinish, pioClose) #define iface__pioDBWriter mth(pioWrite, pioSeek, pioWriteFinish, pioTruncate, pioClose) From 36e52ff9ab6a3adf845d8b92ce11842a468b20da Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 14 Dec 2022 09:18:46 +0300 Subject: [PATCH 176/339] fix for pioLocalFileWriter --- src/utils/file.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index 4d2738c06..e268c842d 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -3532,7 +3532,7 @@ typedef struct pioLocalWriteFile FILE* fl; ft_bytes_t buf; bool use_temp; - bool renamed; + bool delete_in_dispose; } pioLocalWriteFile; #define kls__pioLocalWriteFile iface__pioDBWriter, mth(fobjDispose), \ iface(pioWriteCloser, pioDBWriter) @@ -3764,6 +3764,7 @@ pioLocalDrive_pioOpenRewrite(VSelf, path_t path, int permissions, .path = ft_strdupc(path), .path_tmp = temppath, .use_temp = use_temp, + .delete_in_dispose = true, .fl = fl, .buf = buf); return $bind(pioWriteCloser, res); @@ -3813,6 +3814,7 @@ pioLocalDrive_pioOpenWrite(VSelf, path_t path, int permissions, .path = ft_strdupc(path), .path_tmp = ft_strdupc(path), .use_temp = false, + .delete_in_dispose = exclusive, .fl = fl, .buf = buf); return $bind(pioDBWriter, res); @@ -4229,20 +4231,17 @@ pioLocalWriteFile_pioClose(VSelf, bool sync) fd = fileno(self->fl); + if (fflush(self->fl) != 0) + return $syserr(errno, "Flushing file {path:q}", + path(self->path_tmp.ptr)); + if (ferror(self->fl)) { fclose(self->fl); self->fl = NULL; - if (remove(self->path_tmp.ptr)) - return $syserr(errno, "Couldn't remove file {path:q}", - path(self->path_tmp.ptr)); return $noerr(); } - if (fflush(self->fl) != 0) - return $syserr(errno, "Flushing file {path:q}", - path(self->path_tmp.ptr)); - if (sync) { r = fsync(fd); @@ -4258,7 +4257,7 @@ pioLocalWriteFile_pioClose(VSelf, bool sync) old_path(self->path_tmp.ptr), new_path(self->path.ptr)); /* mark as renamed so fobjDispose will not delete it */ - self->renamed = true; + self->delete_in_dispose = false; if (sync) { @@ -4276,6 +4275,8 @@ pioLocalWriteFile_pioClose(VSelf, bool sync) path(self->path.ptr)); } } + else + self->delete_in_dispose = false; if (fclose(self->fl)) return $syserr(errno, "Cannot close file {path:q}", @@ -4294,7 +4295,7 @@ pioLocalWriteFile_fobjDispose(VSelf) fclose(self->fl); self->fl = NULL; } - if (self->use_temp && !self->renamed) + if (self->delete_in_dispose) { remove(self->path_tmp.ptr); } From ddb0d3329dae19bac2386f7f3a242b15a076a3af Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 14 Dec 2022 09:26:01 +0300 Subject: [PATCH 177/339] [PBCKP-395] force async erorr fetching in pioRemoteWriteFile_pioClose --- src/utils/file.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index e268c842d..c0e8213fc 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -3557,6 +3557,7 @@ fobj_klass(pioRemoteFile); typedef struct pioRemoteWriteFile { ft_str_t path; int handle; + bool did_async; } pioRemoteWriteFile; #define kls__pioRemoteWriteFile iface__pioDBWriter, mth(fobjDispose), \ iface(pioWriteCloser, pioDBWriter) @@ -5078,6 +5079,8 @@ pioRemoteWriteFile_pioWrite(VSelf, ft_bytes_t buf, err_i* err) IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_write_all(fio_stdout, buf.ptr, buf.len), buf.len); + self->did_async = true; + return buf.len; } @@ -5101,6 +5104,8 @@ pioRemoteWriteFile_pioSeek(VSelf, off_t offs) IO_CHECK(fio_write_all(fio_stdout, &req, sizeof(req)), sizeof(req)); + self->did_async = true; + return $noerr(); } @@ -5121,6 +5126,8 @@ pioRemoteWriteFile_pioWriteFinish(VSelf) IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + self->did_async = false; + if (hdr.cop == FIO_PIO_ERROR) return fio_receive_pio_err(&hdr); ft_dbg_assert(hdr.cop == PIO_GET_ASYNC_ERROR); @@ -5149,6 +5156,8 @@ pioRemoteWriteFile_pioTruncate(VSelf, size_t sz) IO_CHECK(fio_write_all(fio_stdout, &req, sizeof(req)), sizeof(req)); + self->did_async = true; + return $noerr(); } @@ -5157,6 +5166,7 @@ pioRemoteWriteFile_pioClose(VSelf, bool sync) { Self(pioRemoteWriteFile); fio_header hdr; + err_i err = $noerr(); struct __attribute__((packed)) { fio_header hdr; bool sync; @@ -5171,6 +5181,9 @@ pioRemoteWriteFile_pioClose(VSelf, bool sync) ft_assert(self->handle >= 0); + if (self->did_async) + err = $(pioWriteFinish, self); + IO_CHECK(fio_write_all(fio_stdout, &req, sizeof(req)), sizeof(req)); IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); @@ -5178,8 +5191,8 @@ pioRemoteWriteFile_pioClose(VSelf, bool sync) self->handle = -1; if (hdr.cop == FIO_PIO_ERROR) - return fio_receive_pio_err(&hdr); - return $noerr(); + err = fobj_err_combine(err, fio_receive_pio_err(&hdr)); + return err; } static void From 7fa6986306ccf6663cf28a8a9df5703a190b7a1e Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 14 Dec 2022 09:59:46 +0300 Subject: [PATCH 178/339] [PBCKP-395] split pioOpen to pioOpenRead and pioOpenStream it simplifies pioCopy as well --- src/archive.c | 19 ++++----- src/catalog.c | 5 +-- src/utils/file.c | 106 +++++++++++++++++++++++------------------------ src/utils/file.h | 17 ++++---- 4 files changed, 71 insertions(+), 76 deletions(-) diff --git a/src/archive.c b/src/archive.c index 3ef6be590..548418da8 100644 --- a/src/archive.c +++ b/src/archive.c @@ -378,7 +378,7 @@ push_file_internal(const char *wal_file_name, const char *pg_xlog_dir, uint32 archive_timeout, bool *skipped) { FOBJ_FUNC_ARP(); - pioFile_i in; + pioReadStream_i in; pioWriteCloser_i out; char from_fullpath[MAXPGPATH]; char to_fullpath[MAXPGPATH]; @@ -446,7 +446,7 @@ push_file_internal(const char *wal_file_name, const char *pg_xlog_dir, } /* Open source file for read */ - in = $i(pioOpen, db_drive, from_fullpath, O_RDONLY | PG_BINARY, .err = &err); + in = $i(pioOpenReadStream, db_drive, from_fullpath, .err = &err); if ($haserr(err)) return $iresult(err); @@ -943,7 +943,7 @@ get_wal_file(const char *filename, const char *from_fullpath, { FOBJ_FUNC_ARP(); pioDBWriter_i out = {NULL}; - pioFile_i in = {NULL}; + pioReadStream_i in = {NULL}; char *buf = pgut_malloc(OUT_BUF_SIZE); /* 1MB buffer */ err_i err = $noerr(); char from_fullpath_gz[MAXPGPATH]; @@ -970,8 +970,7 @@ get_wal_file(const char *filename, const char *from_fullpath, /* If requested file is regular WAL segment, then try to open it with '.gz' suffix... */ if (IsXLogFileName(filename)) { - in = $i(pioOpen, backup_drive, from_fullpath_gz, O_RDONLY | PG_BINARY, - .err = &err); + in = $i(pioOpenReadStream, backup_drive, from_fullpath_gz, .err = &err); compressed = in.self != NULL; if ($haserr(err) && getErrno(err) != ENOENT) elog(ERROR, "Source file: %s", $errmsg(err)); @@ -979,8 +978,7 @@ get_wal_file(const char *filename, const char *from_fullpath, #endif if (in.self == NULL) { - in = $i(pioOpen, backup_drive, from_fullpath, O_RDONLY | PG_BINARY, - .err = &err); + in = $i(pioOpenReadStream, backup_drive, from_fullpath, .err = &err); if ($haserr(err) && getErrno(err) != ENOENT) elog(ERROR, "Source file: %s", $errmsg(err)); } @@ -992,8 +990,7 @@ get_wal_file(const char *filename, const char *from_fullpath, snprintf(from_partial, sizeof(from_partial), "%s.gz.partial", from_fullpath); - in = $i(pioOpen, backup_drive, from_partial, O_RDONLY | PG_BINARY, - .err = &err); + in = $i(pioOpenReadStream, backup_drive, from_partial, .err = &err); compressed = in.self != NULL; if ($haserr(err) && getErrno(err) != ENOENT) elog(ERROR, "Source partial file: %s", $errmsg(err)); @@ -1003,9 +1000,7 @@ get_wal_file(const char *filename, const char *from_fullpath, { snprintf(from_partial, sizeof(from_partial), "%s.partial", from_fullpath); - in = $i(pioOpen, backup_drive, - .path = from_partial, - .flags = O_RDONLY | PG_BINARY, + in = $i(pioOpenReadStream, backup_drive, .path = from_partial, .err = &err); if ($haserr(err) && getErrno(err) != ENOENT) elog(ERROR, "Source partial file: %s", $errmsg(err)); diff --git a/src/catalog.c b/src/catalog.c index c57be6852..b241fe76f 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1020,12 +1020,11 @@ get_backup_filelist(pgBackup *backup, bool strict) line_reader = {0}; ft_bytes_t line; err_i err = $noerr(); - pioFile_i fl; + pioReadStream_i fl; join_path_components(backup_filelist_path, backup->root_dir, DATABASE_FILE_LIST); - fl = $i(pioOpen, backup->backup_location, .path = backup_filelist_path, - .flags = O_RDONLY, .err = &err); + fl = $i(pioOpenReadStream, backup->backup_location, .path = backup_filelist_path, .err = &err); if ($haserr(err)) ft_logerr(FT_FATAL, $errmsg(err), "Opening backup filelist"); diff --git a/src/utils/file.c b/src/utils/file.c index c0e8213fc..982e40a94 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -3511,7 +3511,6 @@ fobj_klass(pioRemoteDrive); typedef struct pioFile { const char *path; - int flags; bool closed; } pioFile; #define kls__pioFile mth(fobjDispose) @@ -3522,7 +3521,7 @@ typedef struct pioLocalFile pioFile p; int fd; } pioLocalFile; -#define kls__pioLocalFile iface__pioFile, iface(pioFile) +#define kls__pioLocalFile iface__pioReader, iface(pioReader, pioReadStream) fobj_klass(pioLocalFile); typedef struct pioLocalWriteFile @@ -3550,7 +3549,8 @@ typedef struct pioRemoteFile void* asyncChunk; ft_bytes_t chunkRest; } pioRemoteFile; -#define kls__pioRemoteFile iface__pioFile, iface(pioFile), \ +#define kls__pioRemoteFile iface__pioReader, \ + iface(pioReader, pioReadStream), \ mth(pioSetAsync, pioAsyncRead) fobj_klass(pioRemoteFile); @@ -3678,29 +3678,30 @@ common_pioExists(fobj_t self, path_t path, pio_file_kind_e expected_kind, err_i /* LOCAL DRIVE */ -static pioFile_i -pioLocalDrive_pioOpen(VSelf, path_t path, int flags, - int permissions, err_i *err) +static pioReader_i +pioLocalDrive_pioOpenRead(VSelf, path_t path, err_i *err) { int fd; fobj_reset_err(err); fobj_t file; - ft_assert((flags & O_ACCMODE) == O_RDONLY); - - if (permissions == 0) - fd = open(path, flags, FILE_PERMISSION); - else - fd = open(path, flags, permissions); + fd = open(path, O_RDONLY); if (fd < 0) { *err = $syserr(errno, "Cannot open file {path:q}", path(path)); - return (pioFile_i){NULL}; + return (pioReader_i){NULL}; } file = $alloc(pioLocalFile, .fd = fd, - .p = { .path = ft_cstrdup(path), .flags = flags } ); - return bind_pioFile(file); + .p = { .path = ft_cstrdup(path) } ); + return $bind(pioReader, file); +} + +static pioReadStream_i +pioLocalDrive_pioOpenReadStream(VSelf, path_t path, err_i *err) +{ + Self(pioLocalDrive); + return $reduce(pioReadStream, $(pioOpenRead, self, path, .err = err)); } static pioWriteCloser_i @@ -3970,9 +3971,8 @@ pioLocalDrive_pioRemoveDir(VSelf, const char *root, bool root_as_well) { static ft_bytes_t pioLocalDrive_pioReadFile(VSelf, path_t path, bool binary, err_i* err) { - FOBJ_FUNC_ARP(); Self(pioLocalDrive); - pioFile_i fl; + FILE* fl = NULL; pio_stat_t st; ft_bytes_t res = ft_bytes(NULL, 0); size_t amount; @@ -3982,7 +3982,6 @@ pioLocalDrive_pioReadFile(VSelf, path_t path, bool binary, err_i* err) st = $(pioStat, self, .path = path, .follow_symlink = true, .err = err); if ($haserr(*err)) { - $iresult(*err); return res; } if (st.pst_kind != PIO_KIND_REGULAR) @@ -3990,7 +3989,6 @@ pioLocalDrive_pioReadFile(VSelf, path_t path, bool binary, err_i* err) *err = $err(RT, "File {path:q} is not regular: {kind}", path(path), kind(pio_file_kind2str(st.pst_kind, path)), errNo(EACCES)); - $iresult(*err); return res; } @@ -3999,7 +3997,6 @@ pioLocalDrive_pioReadFile(VSelf, path_t path, bool binary, err_i* err) { *err = $err(RT, "File {path:q} is too large: {size}", path(path), size(st.pst_size), errNo(EFBIG)); - $iresult(*err); return res; } @@ -4015,28 +4012,30 @@ pioLocalDrive_pioReadFile(VSelf, path_t path, bool binary, err_i* err) * rely on "local file is read whole at once always". * Is it true? */ - fl = $(pioOpen, self, .path = path, .flags = O_RDONLY | (binary ? PG_BINARY : 0), - .err = err); - if ($haserr(*err)) + fl = fopen(path, binary ? "rb" : "r"); + if (fl == NULL) { - $iresult(*err); + *err = $syserr(errno, "Opening file {path:q}", path(path)); + ft_bytes_free(&res); return res; } - amount = pioReadFull($reduce(pioRead, fl), res, err); - if ($haserr(*err)) + amount = fread(res.ptr, 1, res.len, fl); + if (ferror(fl)) { + *err = $syserr(errno, "Opening file {path:q}", path(path)); + fclose(fl); ft_bytes_free(&res); - $iresult(*err); return res; } + fclose(fl); + if (amount != st.pst_size) { ft_bytes_free(&res); *err = $err(RT, "File {path:q} is truncated while reading", path(path), errNo(EBUSY)); - $iresult(*err); return res; } @@ -4044,7 +4043,6 @@ pioLocalDrive_pioReadFile(VSelf, path_t path, bool binary, err_i* err) if (!binary) res.ptr[amount] = 0; - $i(pioClose, fl); return res; } @@ -4104,13 +4102,6 @@ pioLocalFile_pioClose(VSelf, bool sync) ft_assert(self->fd >= 0, "Closed file abused \"%s\"", self->p.path); - if (sync && (self->p.flags & O_ACCMODE) != O_RDONLY) - { - r = fsync(self->fd); - if (r < 0) - err = $syserr(errno, "Cannot fsync file {path:q}", - path(self->p.path)); - } r = close(self->fd); if (r < 0 && $isNULL(err)) err = $syserr(errno, "Cannot close file {path:q}", @@ -4307,24 +4298,20 @@ pioLocalWriteFile_fobjDispose(VSelf) /* REMOTE DRIVE */ -static pioFile_i -pioRemoteDrive_pioOpen(VSelf, path_t path, - int flags, int permissions, - err_i *err) +static pioReader_i +pioRemoteDrive_pioOpenRead(VSelf, path_t path, err_i *err) { int handle; fio_header hdr; fobj_reset_err(err); fobj_t file; - ft_assert((flags & O_ACCMODE) == O_RDONLY); - handle = find_free_handle(); hdr.cop = FIO_OPEN; hdr.handle = handle; hdr.size = strlen(path) + 1; - hdr.arg = flags; + hdr.arg = O_RDONLY; set_handle(handle); IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); @@ -4338,11 +4325,30 @@ pioRemoteDrive_pioOpen(VSelf, path_t path, *err = $syserr((int)hdr.arg, "Cannot open remote file {path:q}", path(path)); unset_handle(hdr.handle); - return (pioFile_i){NULL}; + return (pioReader_i){NULL}; } file = $alloc(pioRemoteFile, .handle = handle, - .p = { .path = ft_cstrdup(path), .flags = flags }); - return bind_pioFile(file); + .p = { .path = ft_cstrdup(path) }); + return $bind(pioReader, file); +} + +static pioReadStream_i +pioRemoteDrive_pioOpenReadStream(VSelf, path_t path, err_i *err) +{ + Self(pioRemoteDrive); + + pioReader_i fl = $(pioOpenRead, self, path, err); + if ($haserr(*err)) + return $null(pioReadStream); + + *err = $(pioSetAsync, fl.self, true); + if ($haserr(*err)) + { + $idel(&fl); + return $null(pioReadStream); + } + + return $reduce(pioReadStream, fl); } static pio_stat_t @@ -5851,14 +5857,6 @@ pioCopyWithFilters(pioWriteFlush_i dest, pioRead_i src, if (copied == NULL) copied = &_fallback_copied; - if ($ifdef(err = , pioSetAsync, src.self) && $haserr(err)) - elog(ERROR, "Cannot enable async mode on source \"%s\": %s", - $irepr(src), $errmsg(err)); - - if ($ifdef(err = , pioSetAsync, dest.self) && $haserr(err)) - elog(ERROR, "Cannot enable async mode on destination \"%s\": %s", - $irepr(dest), $errmsg(err)); - for (i = nfilters - 1; i >= 0; i--) dest = pioWrapWriteFilter(dest, filters[i], OUT_BUF_SIZE); diff --git a/src/utils/file.h b/src/utils/file.h index 169cfc33e..5cfc6ee4d 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -255,12 +255,14 @@ fobj_method(pioTruncate); fobj_method(pioWriteFinish); fobj_method(pioSeek); -#define iface__pioFile mth(pioRead, pioClose, pioSeek) +#define iface__pioReader mth(pioRead, pioClose, pioSeek) +#define iface__pioReadStream mth(pioRead, pioClose) #define iface__pioWriteFlush mth(pioWrite, pioWriteFinish) #define iface__pioWriteCloser mth(pioWrite, pioWriteFinish, pioClose) #define iface__pioDBWriter mth(pioWrite, pioSeek, pioWriteFinish, pioTruncate, pioClose) #define iface__pioReadCloser mth(pioRead, pioClose) -fobj_iface(pioFile); +fobj_iface(pioReader); +fobj_iface(pioReadStream); fobj_iface(pioWriteFlush); fobj_iface(pioWriteCloser); fobj_iface(pioDBWriter); @@ -287,9 +289,8 @@ fobj_method(pioFinalPageN); fobj_iface(pioPagesIterator); // Drive -#define mth__pioOpen pioFile_i, (path_t, path), (int, flags), \ - (int, permissions), (err_i *, err) -#define mth__pioOpen__optional() (permissions, FILE_PERMISSION) +#define mth__pioOpenRead pioReader_i, (path_t, path), (err_i *, err) +#define mth__pioOpenReadStream pioReadStream_i, (path_t, path), (err_i *, err) #define mth__pioOpenWrite pioDBWriter_i, (path_t, path), (int, permissions), \ (bool, exclusive), (err_i *, err) #define mth__pioOpenWrite__optional() (exclusive, false), (permissions, FILE_PERMISSION) @@ -328,7 +329,8 @@ fobj_iface(pioPagesIterator); (CompressAlg, calg), (int, clevel), \ (uint32, checksum_version), (bool, just_validate), (err_i*, err) -fobj_method(pioOpen); +fobj_method(pioOpenRead); +fobj_method(pioOpenReadStream); fobj_method(pioOpenRewrite); fobj_method(pioOpenWrite); fobj_method(pioStat); @@ -345,7 +347,8 @@ fobj_method(pioReadFile); fobj_method(pioWriteFile); fobj_method(pioIteratePages); -#define iface__pioDrive mth(pioOpen, pioStat, pioRemove, pioRename), \ +#define iface__pioDrive mth(pioOpenRead, pioOpenReadStream), \ + mth(pioStat, pioRemove, pioRename), \ mth(pioExists, pioGetCRC32, pioIsRemote), \ mth(pioMakeDir, pioListDir, pioRemoveDir), \ mth(pioFilesAreSame, pioReadFile, pioWriteFile), \ From 42ef001b9de96a18d5db3349ebd935ecae31dfc4 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 14 Dec 2022 10:55:42 +0300 Subject: [PATCH 179/339] add pioWriteCompressed to pioDBWriter --- src/utils/file.c | 61 ++++++++++++++++++++++++++++++++++++++++++++++++ src/utils/file.h | 6 ++++- 2 files changed, 66 insertions(+), 1 deletion(-) diff --git a/src/utils/file.c b/src/utils/file.c index 982e40a94..eb7d090b8 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -3403,6 +3403,19 @@ fio_communicate(int in, int out) $iset(&async_errs[hdr.handle], err); break; } + case PIO_WRITE_COMPRESSED_ASYNC: + { + err_i err; + + ft_assert(hdr.handle >= 0); + ft_assert(objs[hdr.handle] != NULL); + + err = $(pioWriteCompressed, objs[hdr.handle], ft_bytes(buf, hdr.size), + .compress_alg = hdr.arg); + if ($haserr(err)) + $iset(&async_errs[hdr.handle], err); + break; + } case PIO_SEEK: { err_i err; @@ -4170,6 +4183,35 @@ pioLocalWriteFile_pioWrite(VSelf, ft_bytes_t buf, err_i* err) return r; } +static err_i +pioLocalWriteFile_pioWriteCompressed(VSelf, ft_bytes_t buf, CompressAlg compress_alg) +{ + Self(pioLocalWriteFile); + err_i err; + char decbuf[BLCKSZ]; + const char *errormsg = NULL; + int32 uncompressed_size; + + ft_assert(buf.len != 0); + + uncompressed_size = do_decompress(decbuf, BLCKSZ, buf.ptr, buf.len, + compress_alg, &errormsg); + if (errormsg != NULL) + { + return $err(RT, "An error occured during decompressing block for {path:q}: {causeStr}", + path(self->path.ptr), causeStr(errormsg)); + } + + if (uncompressed_size != BLCKSZ) + { + return $err(RT, "Page uncompressed to {size} bytes != BLCKSZ (for {path:q})", + path(self->path.ptr), size(uncompressed_size)); + } + + $(pioWrite, self, ft_bytes(decbuf, BLCKSZ), .err = &err); + return err; +} + static err_i pioLocalWriteFile_pioSeek(VSelf, off_t offs) { @@ -5090,6 +5132,25 @@ pioRemoteWriteFile_pioWrite(VSelf, ft_bytes_t buf, err_i* err) return buf.len; } +static err_i +pioRemoteWriteFile_pioWriteCompressed(VSelf, ft_bytes_t buf, CompressAlg compress_alg) +{ + Self(pioRemoteWriteFile); + fio_header hdr = { + .cop = PIO_WRITE_COMPRESSED_ASYNC, + .handle = self->handle, + .size = buf.len, + .arg = compress_alg, + }; + + ft_assert(self->handle >= 0); + + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_write_all(fio_stdout, buf.ptr, buf.len), buf.len); + + return $noerr(); +} + static err_i pioRemoteWriteFile_pioSeek(VSelf, off_t offs) { diff --git a/src/utils/file.h b/src/utils/file.h index 5cfc6ee4d..94e04da32 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -78,6 +78,7 @@ typedef enum PIO_OPEN_REWRITE, PIO_OPEN_WRITE, PIO_WRITE_ASYNC, + PIO_WRITE_COMPRESSED_ASYNC, PIO_SEEK, PIO_TRUNCATE, PIO_GET_ASYNC_ERROR, @@ -244,6 +245,7 @@ fobj_error_cstr_key(gzErrStr); #define mth__pioClose__optional() (sync, false) #define mth__pioRead size_t, (ft_bytes_t, buf), (err_i *, err) #define mth__pioWrite size_t, (ft_bytes_t, buf), (err_i *, err) +#define mth__pioWriteCompressed err_i, (ft_bytes_t, buf), (CompressAlg, compress_alg) #define mth__pioTruncate err_i, (size_t, sz) #define mth__pioWriteFinish err_i #define mth__pioSeek err_i, (off_t, offs) @@ -251,6 +253,7 @@ fobj_error_cstr_key(gzErrStr); fobj_method(pioClose); fobj_method(pioRead); fobj_method(pioWrite); +fobj_method(pioWriteCompressed); fobj_method(pioTruncate); fobj_method(pioWriteFinish); fobj_method(pioSeek); @@ -259,7 +262,8 @@ fobj_method(pioSeek); #define iface__pioReadStream mth(pioRead, pioClose) #define iface__pioWriteFlush mth(pioWrite, pioWriteFinish) #define iface__pioWriteCloser mth(pioWrite, pioWriteFinish, pioClose) -#define iface__pioDBWriter mth(pioWrite, pioSeek, pioWriteFinish, pioTruncate, pioClose) +#define iface__pioDBWriter mth(pioWrite, pioSeek, pioWriteCompressed), \ + mth(pioWriteFinish, pioTruncate, pioClose) #define iface__pioReadCloser mth(pioRead, pioClose) fobj_iface(pioReader); fobj_iface(pioReadStream); From 1f773d6d54ac19ed2917d266c893d9bf59b862ef Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 14 Dec 2022 11:09:33 +0300 Subject: [PATCH 180/339] change pioWrite signature since it always return error on short write and we don't use short write in any meaningful way. --- src/catalog.c | 2 +- src/data.c | 14 ++++---- src/utils/file.c | 85 +++++++++++++++++++----------------------------- src/utils/file.h | 2 +- 4 files changed, 44 insertions(+), 59 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index b241fe76f..aa5351e47 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -2562,7 +2562,7 @@ write_backup_filelist(pgBackup *backup, parray *files, const char *root, ft_strbuf_catf(&line, "}\n"); - $i(pioWrite, wrapped, ft_bytes(line.ptr, line.len), &err); + err = $i(pioWrite, wrapped, ft_bytes(line.ptr, line.len)); ft_strbuf_reset_for_reuse(&line); diff --git a/src/data.c b/src/data.c index df9fa05c1..c8f890ab4 100644 --- a/src/data.c +++ b/src/data.c @@ -307,12 +307,16 @@ backup_page(pioWrite_i out, BlockNumber blknum, ft_bytes_t page, bph.block = blknum; bph.compressed_size = page.len; - n = $i(pioWrite, out, .buf = ft_bytes(&bph, sizeof(bph)), .err = err); + *err = $i(pioWrite, out, .buf = ft_bytes(&bph, sizeof(bph))); if ($haserr(*err)) - return n; + return 0; + n = sizeof(bph); /* write data page */ - return n + $i(pioWrite, out, .buf = page, .err = err); + *err = $i(pioWrite, out, .buf = page); + if ($noerr(*err)) + n += page.len; + return n; } /* Write page as-is. TODO: make it fastpath option in compress_and_backup_page() */ @@ -321,17 +325,15 @@ write_page(pgFile *file, pioDBWriter_i out, int blknum, Page page) { err_i err = $noerr(); off_t target = blknum * BLCKSZ; - size_t rc; err = $i(pioSeek, out, target); if ($haserr(err)) ft_logerr(FT_ERROR, $errmsg(err), "write_page"); /* write data page */ - rc = $i(pioWrite, out, .buf = ft_bytes(page, BLCKSZ), .err = &err); + err = $i(pioWrite, out, .buf = ft_bytes(page, BLCKSZ)); if ($haserr(err)) ft_log(FT_INFO, $errmsg(err), "write_page"); - ft_assert(rc == BLCKSZ); file->write_size += BLCKSZ; file->uncompressed_size += BLCKSZ; diff --git a/src/utils/file.c b/src/utils/file.c index eb7d090b8..e65b646b2 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -3397,8 +3397,7 @@ fio_communicate(int in, int out) ft_assert(hdr.handle >= 0); ft_assert(objs[hdr.handle] != NULL); - $(pioWrite, objs[hdr.handle], ft_bytes(buf, hdr.size), - .err = &err); + err = $(pioWrite, objs[hdr.handle], ft_bytes(buf, hdr.size)); if ($haserr(err)) $iset(&async_errs[hdr.handle], err); break; @@ -4081,7 +4080,7 @@ pioLocalDrive_pioWriteFile(VSelf, path_t path, ft_bytes_t content, bool binary) if ($haserr(err)) return $iresult(err); - $i(pioWrite, fl, content, .err = &err); + err = $i(pioWrite, fl, content); if ($haserr(err)) return $iresult(err); @@ -4166,28 +4165,26 @@ pioLocalFile_fobjRepr(VSelf) (path, $S(self->p.path)), (fd, $I(self->fd))); } -static size_t -pioLocalWriteFile_pioWrite(VSelf, ft_bytes_t buf, err_i* err) +static err_i +pioLocalWriteFile_pioWrite(VSelf, ft_bytes_t buf) { Self(pioLocalWriteFile); - fobj_reset_err(err); size_t r; if (buf.len == 0) - return 0; + return $noerr(); r = fwrite(buf.ptr, 1, buf.len, self->fl); if (r < buf.len) - *err = $syserr(errno, "Writting file {path:q}", + return $syserr(errno, "Writting file {path:q}", path(self->path_tmp.ptr)); - return r; + return $noerr(); } static err_i pioLocalWriteFile_pioWriteCompressed(VSelf, ft_bytes_t buf, CompressAlg compress_alg) { Self(pioLocalWriteFile); - err_i err; char decbuf[BLCKSZ]; const char *errormsg = NULL; int32 uncompressed_size; @@ -4208,8 +4205,7 @@ pioLocalWriteFile_pioWriteCompressed(VSelf, ft_bytes_t buf, CompressAlg compress path(self->path.ptr), size(uncompressed_size)); } - $(pioWrite, self, ft_bytes(decbuf, BLCKSZ), .err = &err); - return err; + return $(pioWrite, self, ft_bytes(decbuf, BLCKSZ)); } static err_i @@ -5105,17 +5101,16 @@ pioRemoteDrive_pioOpenWrite(VSelf, path_t path, int permissions, return $bind(pioDBWriter, fl); } -static size_t -pioRemoteWriteFile_pioWrite(VSelf, ft_bytes_t buf, err_i* err) +static err_i +pioRemoteWriteFile_pioWrite(VSelf, ft_bytes_t buf) { Self(pioRemoteWriteFile); - fobj_reset_err(err); fio_header hdr; ft_assert(self->handle >= 0); if (buf.len == 0) - return 0; + return $noerr(); hdr = (fio_header){ .cop = PIO_WRITE_ASYNC, @@ -5129,7 +5124,7 @@ pioRemoteWriteFile_pioWrite(VSelf, ft_bytes_t buf, err_i* err) self->did_async = true; - return buf.len; + return $noerr(); } static err_i @@ -5433,24 +5428,21 @@ pioWrapWriteFilter(pioWriteFlush_i fl, pioFilter_i flt, size_t buf_size) return bind_pioWriteFlush(wrap); } -static size_t -pioWriteFilter_pioWrite(VSelf, ft_bytes_t rbuf, err_i *err) +static err_i +pioWriteFilter_pioWrite(VSelf, ft_bytes_t rbuf) { Self(pioWriteFilter); - fobj_reset_err(err); + err_i err = $noerr(); pioFltTransformResult tr; size_t rlen = rbuf.len; ft_bytes_t wbuf; - size_t r; if ($notNULL(self->inplace)) { - *err = $i(pioFltInPlace, self->inplace, rbuf); - if ($haserr(*err)) - return 0; - r = $i(pioWrite, self->wrapped, rbuf, err); - ft_bytes_consume(&rbuf, r); - return rlen - rbuf.len; + err = $i(pioFltInPlace, self->inplace, rbuf); + if ($haserr(err)) + return err; + return $i(pioWrite, self->wrapped, rbuf); } while (rbuf.len > 0) @@ -5458,9 +5450,9 @@ pioWriteFilter_pioWrite(VSelf, ft_bytes_t rbuf, err_i *err) wbuf = ft_bytes(self->buffer, self->capa); while (wbuf.len > 0) { - tr = $i(pioFltTransform, self->filter, rbuf, wbuf, err); - if ($haserr(*err)) - return rlen - rbuf.len; + tr = $i(pioFltTransform, self->filter, rbuf, wbuf, &err); + if ($haserr(err)) + return err; ft_bytes_consume(&rbuf, tr.consumed); ft_bytes_consume(&wbuf, tr.produced); @@ -5475,18 +5467,17 @@ pioWriteFilter_pioWrite(VSelf, ft_bytes_t rbuf, err_i *err) ft_dbg_assert(rbuf.len == 0); break; } - r = $i(pioWrite, self->wrapped, wbuf, err); - if ($haserr(*err)) - return rlen - rbuf.len; - ft_assert(r == wbuf.len); + err = $i(pioWrite, self->wrapped, wbuf); + if ($haserr(err)) + return err; } if (rbuf.len) { - *err = $err(SysErr, "short write: {writtenSz} < {wantedSz}", + return $err(SysErr, "short write: {writtenSz} < {wantedSz}", writtenSz(rlen - rbuf.len), wantedSz(rbuf.len)); } - return rlen - rbuf.len; + return $noerr(); } static err_i @@ -5519,10 +5510,9 @@ pioWriteFilter_pioWriteFinish(VSelf) break; ft_assert(wbuf.len > 0); - r = $i(pioWrite, self->wrapped, wbuf, &err); + err = $i(pioWrite, self->wrapped, wbuf); if ($haserr(err)) return err; - ft_assert(r == wbuf.len); } return $i(pioWriteFinish, self->wrapped); @@ -5876,11 +5866,10 @@ pioDevNull_alloc(void) return bind_pioWriteFlush(wrap); } -static size_t -pioDevNull_pioWrite(VSelf, ft_bytes_t buf, err_i *err) +static err_i +pioDevNull_pioWrite(VSelf, ft_bytes_t buf) { - fobj_reset_err(err); - return buf.len; + return $noerr(); } static err_i @@ -5926,21 +5915,15 @@ pioCopyWithFilters(pioWriteFlush_i dest, pioRead_i src, while (!$haserr(rerr) && !$haserr(werr)) { size_t read_len = 0; - size_t write_len = 0; read_len = $i(pioRead, src, ft_bytes(buf, OUT_BUF_SIZE), &rerr); if (read_len == 0) break; - write_len = $i(pioWrite, dest, ft_bytes(buf, read_len), &werr); - *copied += write_len; - if (write_len != read_len) - { - werr = $err(SysErr, "Short write to destination file {path}: {writtenSz} < {wantedSz}", - path($irepr(dest)), - wantedSz(read_len), writtenSz(write_len)); - } + werr = $i(pioWrite, dest, ft_bytes(buf, read_len)); + if ($noerr(werr)) + *copied += read_len; } err = fobj_err_combine(rerr, werr); diff --git a/src/utils/file.h b/src/utils/file.h index 94e04da32..58b8b3f10 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -244,7 +244,7 @@ fobj_error_cstr_key(gzErrStr); #define mth__pioClose err_i, (bool, sync) #define mth__pioClose__optional() (sync, false) #define mth__pioRead size_t, (ft_bytes_t, buf), (err_i *, err) -#define mth__pioWrite size_t, (ft_bytes_t, buf), (err_i *, err) +#define mth__pioWrite err_i, (ft_bytes_t, buf) #define mth__pioWriteCompressed err_i, (ft_bytes_t, buf), (CompressAlg, compress_alg) #define mth__pioTruncate err_i, (size_t, sz) #define mth__pioWriteFinish err_i From 3f18ffab8bf9a5e552f0ed074ab4869b5166efc1 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 14 Dec 2022 15:59:22 +0300 Subject: [PATCH 181/339] fix pioWriteCompressed --- src/utils/file.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/utils/file.c b/src/utils/file.c index e65b646b2..fc0f5257f 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -5143,6 +5143,8 @@ pioRemoteWriteFile_pioWriteCompressed(VSelf, ft_bytes_t buf, CompressAlg compres IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_write_all(fio_stdout, buf.ptr, buf.len), buf.len); + self->did_async = true; + return $noerr(); } From f3a5de561af794f7019f9b4e231fe24f823f596b Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 14 Dec 2022 16:13:25 +0300 Subject: [PATCH 182/339] merge_test: duplicate fio_remove breakpoints to pioRemove__do --- tests/merge_test.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/merge_test.py b/tests/merge_test.py index 59ba3d9c6..66ccad78e 100644 --- a/tests/merge_test.py +++ b/tests/merge_test.py @@ -1132,6 +1132,7 @@ def test_continue_failed_merge_2(self): gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True) gdb.set_breakpoint('fio_remove') + gdb.set_breakpoint('pioRemove__do') gdb.run_until_break() @@ -1612,6 +1613,7 @@ def test_failed_merge_after_delete(self): gdb.run_until_break() gdb.set_breakpoint('fio_remove') + gdb.set_breakpoint('pioRemove__do') gdb.continue_execution_until_break(20) gdb._execute('signal SIGKILL') @@ -1695,6 +1697,7 @@ def test_failed_merge_after_delete_1(self): # gdb.continue_execution_until_break() gdb.set_breakpoint('fio_remove') + gdb.set_breakpoint('pioRemove__do') gdb.continue_execution_until_break(30) gdb._execute('signal SIGKILL') @@ -1758,6 +1761,7 @@ def test_failed_merge_after_delete_2(self): gdb.set_breakpoint('delete_backup_files') gdb.run_until_break() gdb.set_breakpoint('fio_remove') + gdb.set_breakpoint('pioRemove__do') gdb.run_until_break() gdb.continue_execution_until_break(2) gdb._execute('signal SIGKILL') @@ -1835,6 +1839,7 @@ def test_failed_merge_after_delete_3(self): gdb.run_until_break() gdb.set_breakpoint('fio_remove') + gdb.set_breakpoint('pioRemove__do') gdb.continue_execution_until_break(20) gdb._execute('signal SIGKILL') From 4f153eb70fbbf73b461a0d0ab1bd62442ec392ce Mon Sep 17 00:00:00 2001 From: "s.fukanchik" Date: Wed, 14 Dec 2022 23:39:18 +0300 Subject: [PATCH 183/339] PBCKP-403 use pio in write_page_headers --- src/backup.c | 5 +---- src/data.c | 56 ++++++++++++++++++++++++---------------------- src/merge.c | 13 ++--------- src/pg_probackup.h | 4 +--- 4 files changed, 33 insertions(+), 45 deletions(-) diff --git a/src/backup.c b/src/backup.c index 91fdc4a1b..0a56f765c 100644 --- a/src/backup.c +++ b/src/backup.c @@ -523,12 +523,9 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, } /* close and sync page header map */ - if (current.hdr_map.fp) + if ($notNULL(current.hdr_map.fp)) { cleanup_header_map(&(current.hdr_map)); - - if (fio_sync(FIO_BACKUP_HOST, current.hdr_map.path) != 0) - elog(ERROR, "Cannot sync file \"%s\": %s", current.hdr_map.path, strerror(errno)); } /* close ssh session in main thread */ diff --git a/src/data.c b/src/data.c index c8f890ab4..eeec497ab 100644 --- a/src/data.c +++ b/src/data.c @@ -2046,8 +2046,10 @@ get_data_file_headers(HeaderMap *hdr_map, pgFile *file, uint32 backup_version, b void write_page_headers(BackupPageHeader2 *headers, pgFile *file, HeaderMap *hdr_map, bool is_merge) { + FOBJ_FUNC_ARP(); + pioDBDrive_i drive = pioDBDriveForLocation(FIO_BACKUP_HOST); + err_i err = $noerr(); size_t read_len = 0; - char *map_path = NULL; /* header compression */ int z_len = 0; char *zheaders = NULL; @@ -2057,7 +2059,6 @@ write_page_headers(BackupPageHeader2 *headers, pgFile *file, HeaderMap *hdr_map, return; /* when running merge we must write headers into temp map */ - map_path = (is_merge) ? hdr_map->path_tmp : hdr_map->path; read_len = (file->n_headers + 1) * sizeof(BackupPageHeader2); /* calculate checksums */ @@ -2075,23 +2076,17 @@ write_page_headers(BackupPageHeader2 *headers, pgFile *file, HeaderMap *hdr_map, /* writing to header map must be serialized */ pthread_lock(&(hdr_map->mutex)); /* what if we crash while trying to obtain mutex? */ - if (!hdr_map->fp) + if ($isNULL(hdr_map->fp)) { - elog(LOG, "Creating page header map \"%s\"", map_path); - - hdr_map->fp = fopen(map_path, PG_BINARY_A); - if (hdr_map->fp == NULL) - elog(ERROR, "Cannot open header file \"%s\": %s", - map_path, strerror(errno)); - - /* enable buffering for header file */ - hdr_map->buf = pgut_malloc(LARGE_CHUNK_SIZE); - setvbuf(hdr_map->fp, hdr_map->buf, _IOFBF, LARGE_CHUNK_SIZE); + elog(LOG, "Creating page header map \"%s\"", hdr_map->path); - /* update file permission */ - if (chmod(map_path, FILE_PERMISSION) == -1) - elog(ERROR, "Cannot change mode of \"%s\": %s", map_path, - strerror(errno)); + hdr_map->fp = $iref( $i(pioOpenRewrite, drive, .path = hdr_map->path, + .permissions = FILE_PERMISSION, .binary = true, + .use_temp = is_merge, &err) ); + if ($haserr(err)) + { + ft_logerr(FT_FATAL, $errmsg(err), "opening header map for write"); + } file->hdr_off = 0; } @@ -2111,8 +2106,9 @@ write_page_headers(BackupPageHeader2 *headers, pgFile *file, HeaderMap *hdr_map, elog(VERBOSE, "Writing headers for file \"%s\" offset: %llu, len: %i, crc: %u", file->rel_path, file->hdr_off, z_len, file->hdr_crc); - if (fwrite(zheaders, 1, z_len, hdr_map->fp) != z_len) - elog(ERROR, "Cannot write to file \"%s\": %s", map_path, strerror(errno)); + err = $i(pioWrite, hdr_map->fp, .buf = ft_bytes(zheaders, z_len)); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "writing header map"); file->hdr_size = z_len; /* save the length of compressed headers */ hdr_map->offset += z_len; /* update current offset in map */ @@ -2126,21 +2122,27 @@ write_page_headers(BackupPageHeader2 *headers, pgFile *file, HeaderMap *hdr_map, void init_header_map(pgBackup *backup) { - backup->hdr_map.fp = NULL; - backup->hdr_map.buf = NULL; + $setNULL(&backup->hdr_map.fp); + join_path_components(backup->hdr_map.path, backup->root_dir, HEADER_MAP); - join_path_components(backup->hdr_map.path_tmp, backup->root_dir, HEADER_MAP_TMP); backup->hdr_map.mutex = (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER; } void cleanup_header_map(HeaderMap *hdr_map) { + FOBJ_FUNC_ARP(); + err_i err = $noerr(); + /* cleanup descriptor */ - if (hdr_map->fp && fclose(hdr_map->fp)) - elog(ERROR, "Cannot close file \"%s\"", hdr_map->path); - hdr_map->fp = NULL; + if ($notNULL(hdr_map->fp)) + { + err = $i(pioClose, hdr_map->fp, .sync = true); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "closing header map"); + $idel(&hdr_map->fp); + $setNULL(&hdr_map->fp); + } + hdr_map->offset = 0; - pg_free(hdr_map->buf); - hdr_map->buf = NULL; } diff --git a/src/merge.c b/src/merge.c index 1182cc15d..eaab1c2ec 100644 --- a/src/merge.c +++ b/src/merge.c @@ -434,6 +434,7 @@ merge_chain(InstanceState *instanceState, parray *parent_chain, pgBackup *full_backup, pgBackup *dest_backup, bool no_validate, bool no_sync) { + FOBJ_FUNC_ARP(); int i; char full_external_prefix[MAXPGPATH]; char full_database_dir[MAXPGPATH]; @@ -704,19 +705,9 @@ merge_chain(InstanceState *instanceState, pretty_time); /* If temp header map is open, then close it and make rename */ - if (full_backup->hdr_map.fp) + if ($notNULL(full_backup->hdr_map.fp)) { cleanup_header_map(&(full_backup->hdr_map)); - - /* sync new header map to disk */ - if (fio_sync(FIO_BACKUP_HOST, full_backup->hdr_map.path_tmp) != 0) - elog(ERROR, "Cannot sync temp header map \"%s\": %s", - full_backup->hdr_map.path_tmp, strerror(errno)); - - /* Replace old header map with new one */ - if (rename(full_backup->hdr_map.path_tmp, full_backup->hdr_map.path)) - elog(ERROR, "Could not rename file \"%s\" to \"%s\": %s", - full_backup->hdr_map.path_tmp, full_backup->hdr_map.path, strerror(errno)); } /* Close page header maps */ diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 209eb5511..2e972b1ff 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -386,9 +386,7 @@ typedef struct PGNodeInfo typedef struct HeaderMap { char path[MAXPGPATH]; - char path_tmp[MAXPGPATH]; /* used only in merge */ - FILE *fp; /* used only for writing */ - char *buf; /* buffer */ + pioWriteCloser_i fp; /* used only for writing */ pg_off_t offset; /* current position in fp */ pthread_mutex_t mutex; From 482c5d993305f6e60753935651d42c86f7110d93 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 15 Dec 2022 01:01:40 +0300 Subject: [PATCH 184/339] fix pioTruncate/pioSeek signature: it is better to accept uint64_t --- src/utils/file.c | 12 ++++++------ src/utils/file.h | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index fc0f5257f..1e2f87cfa 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -4143,7 +4143,7 @@ pioLocalFile_pioRead(VSelf, ft_bytes_t buf, err_i *err) } static err_i -pioLocalFile_pioSeek(VSelf, off_t offs) +pioLocalFile_pioSeek(VSelf, uint64_t offs) { Self(pioLocalFile); @@ -4209,7 +4209,7 @@ pioLocalWriteFile_pioWriteCompressed(VSelf, ft_bytes_t buf, CompressAlg compress } static err_i -pioLocalWriteFile_pioSeek(VSelf, off_t offs) +pioLocalWriteFile_pioSeek(VSelf, uint64_t offs) { Self(pioLocalWriteFile); @@ -4234,7 +4234,7 @@ pioLocalWriteFile_pioWriteFinish(VSelf) } static err_i -pioLocalWriteFile_pioTruncate(VSelf, size_t sz) +pioLocalWriteFile_pioTruncate(VSelf, uint64_t sz) { Self(pioLocalWriteFile); ft_assert(self->fl != NULL, "Closed file abused \"%s\"", self->path_tmp.ptr); @@ -4942,7 +4942,7 @@ pioRemoteFile_pioRead(VSelf, ft_bytes_t buf, err_i *err) } static err_i -pioRemoteFile_pioSeek(VSelf, off_t offs) +pioRemoteFile_pioSeek(VSelf, uint64_t offs) { Self(pioRemoteFile); fio_header hdr; @@ -5149,7 +5149,7 @@ pioRemoteWriteFile_pioWriteCompressed(VSelf, ft_bytes_t buf, CompressAlg compres } static err_i -pioRemoteWriteFile_pioSeek(VSelf, off_t offs) +pioRemoteWriteFile_pioSeek(VSelf, uint64_t offs) { Self(pioRemoteWriteFile); struct __attribute__((packed)) { @@ -5200,7 +5200,7 @@ pioRemoteWriteFile_pioWriteFinish(VSelf) } static err_i -pioRemoteWriteFile_pioTruncate(VSelf, size_t sz) +pioRemoteWriteFile_pioTruncate(VSelf, uint64_t sz) { Self(pioRemoteWriteFile); diff --git a/src/utils/file.h b/src/utils/file.h index 58b8b3f10..7191e0c3b 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -246,9 +246,9 @@ fobj_error_cstr_key(gzErrStr); #define mth__pioRead size_t, (ft_bytes_t, buf), (err_i *, err) #define mth__pioWrite err_i, (ft_bytes_t, buf) #define mth__pioWriteCompressed err_i, (ft_bytes_t, buf), (CompressAlg, compress_alg) -#define mth__pioTruncate err_i, (size_t, sz) +#define mth__pioTruncate err_i, (uint64_t, sz) #define mth__pioWriteFinish err_i -#define mth__pioSeek err_i, (off_t, offs) +#define mth__pioSeek err_i, (uint64_t, offs) fobj_method(pioClose); fobj_method(pioRead); From f8286da56899c56e7ec84b731fb1480c0e75acc9 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 15 Dec 2022 02:15:22 +0300 Subject: [PATCH 185/339] [PBCKP-404] use pioDBWriter for restore_data_file/restore_non_data_file --- src/data.c | 92 +++++++++++++++++++++++++--------------------- src/merge.c | 21 ++++++----- src/pg_probackup.h | 10 +---- src/restore.c | 60 +++++++++++------------------- src/utils/file.h | 2 +- 5 files changed, 86 insertions(+), 99 deletions(-) diff --git a/src/data.c b/src/data.c index eeec497ab..48c7902e2 100644 --- a/src/data.c +++ b/src/data.c @@ -45,6 +45,13 @@ static err_i copy_pages(const char *to_fullpath, const char *from_fullpath, pgFi XLogRecPtr sync_lsn, uint32 checksum_version, BackupMode backup_mode); +static size_t restore_data_file_internal(FILE *in, pioDBWriter_i out, pgFile *file, uint32 backup_version, + const char *from_fullpath, const char *to_fullpath, int nblocks, + datapagemap_t *map, PageState *checksum_map, int checksum_version, + datapagemap_t *lsn_map, BackupPageHeader2 *headers); +static void restore_non_data_file_internal(FILE *in, pioDBWriter_i out, pgFile *file, + const char *from_fullpath, const char *to_fullpath); + #ifdef HAVE_LIBZ /* Implementation of zlib compression method */ static int32 @@ -569,13 +576,14 @@ backup_non_data_file(pgFile *file, pgFile *prev_file, * Apply changed blocks to destination file from every backup in parent chain. */ size_t -restore_data_file(parray *parent_chain, pgFile *dest_file, FILE *out, +restore_data_file(parray *parent_chain, pgFile *dest_file, pioDBWriter_i out, const char *to_fullpath, bool use_bitmap, PageState *checksum_map, XLogRecPtr shift_lsn, datapagemap_t *lsn_map, bool use_headers) { size_t total_write_len = 0; char *in_buf = pgut_malloc(STDIO_BUFSIZE); int backup_seq = 0; + err_i err; /* * FULL -> INCR -> DEST @@ -682,6 +690,13 @@ restore_data_file(parray *parent_chain, pgFile *dest_file, FILE *out, } pg_free(in_buf); + if (dest_file->n_blocks > 0) /* old binary's backups didn't have n_blocks field */ + { + err = $i(pioTruncate, out, .size = (int64_t)dest_file->n_blocks * BLCKSZ); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Could not truncate datafile"); + } + return total_write_len; } @@ -695,7 +710,7 @@ restore_data_file(parray *parent_chain, pgFile *dest_file, FILE *out, * marked as already restored, then page is skipped. */ size_t -restore_data_file_internal(FILE *in, FILE *out, pgFile *file, uint32 backup_version, +restore_data_file_internal(FILE *in, pioDBWriter_i out, pgFile *file, uint32 backup_version, const char *from_fullpath, const char *to_fullpath, int nblocks, datapagemap_t *map, PageState *checksum_map, int checksum_version, datapagemap_t *lsn_map, BackupPageHeader2 *headers) @@ -705,6 +720,7 @@ restore_data_file_internal(FILE *in, FILE *out, pgFile *file, uint32 backup_vers size_t write_len = 0; off_t cur_pos_out = 0; off_t cur_pos_in = 0; + err_i err = $noerr(); /* should not be possible */ Assert(!(backup_version >= 20400 && file->n_headers <= 0)); @@ -719,9 +735,9 @@ restore_data_file_internal(FILE *in, FILE *out, pgFile *file, uint32 backup_vers * a lot when blocks from incremental backup are restored, * but should never happen in case of blocks from FULL backup. */ - if (fio_fseek(out, cur_pos_out) < 0) - elog(ERROR, "Cannot seek block %u of \"%s\": %s", - blknum, to_fullpath, strerror(errno)); + err = $i(pioSeek, out, cur_pos_out); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Cannot seek block %u"); for (;;) { @@ -811,16 +827,9 @@ restore_data_file_internal(FILE *in, FILE *out, pgFile *file, uint32 backup_vers elog(VERBOSE, "Truncate file \"%s\" to block %u", to_fullpath, blknum); - /* To correctly truncate file, we must first flush STDIO buffers */ - if (fio_fflush(out) != 0) - elog(ERROR, "Cannot flush file \"%s\": %s", to_fullpath, strerror(errno)); - - /* Set position to the start of file */ - if (fio_fseek(out, 0) < 0) - elog(ERROR, "Cannot seek to the start of file \"%s\": %s", to_fullpath, strerror(errno)); - - if (fio_ftruncate(out, blknum * BLCKSZ) != 0) - elog(ERROR, "Cannot truncate file \"%s\": %s", to_fullpath, strerror(errno)); + err = $i(pioTruncate, out, (uint64_t)blknum * BLCKSZ); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), ""); break; } @@ -910,9 +919,10 @@ restore_data_file_internal(FILE *in, FILE *out, pgFile *file, uint32 backup_vers if (cur_pos_out != write_pos) { - if (fio_fseek(out, write_pos) < 0) - elog(ERROR, "Cannot seek block %u of \"%s\": %s", - blknum, to_fullpath, strerror(errno)); + err = $i(pioSeek, out, write_pos); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Cannot seek block %u", + blknum); cur_pos_out = write_pos; } @@ -922,20 +932,13 @@ restore_data_file_internal(FILE *in, FILE *out, pgFile *file, uint32 backup_vers * send compressed page to the remote side. */ if (is_compressed) - { - ssize_t rc; - rc = fio_fwrite_async_compressed(out, page.data, compressed_size, file->compress_alg); - - if (!fio_is_remote_file(out) && rc != BLCKSZ) - elog(ERROR, "Cannot write block %u of \"%s\": %s, size: %u", - blknum, to_fullpath, strerror(errno), compressed_size); - } + err = $i(pioWriteCompressed, out, ft_bytes(page.data, compressed_size), + .compress_alg = file->compress_alg); else - { - if (fio_fwrite_async(out, page.data, BLCKSZ) != BLCKSZ) - elog(ERROR, "Cannot write block %u of \"%s\": %s", - blknum, to_fullpath, strerror(errno)); - } + err = $i(pioWrite, out, ft_bytes(page.data, BLCKSZ)); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Cannot write block %u", + blknum); write_len += BLCKSZ; cur_pos_out += BLCKSZ; /* update current write position */ @@ -955,9 +958,10 @@ restore_data_file_internal(FILE *in, FILE *out, pgFile *file, uint32 backup_vers * it is either small control file or already compressed cfs file. */ void -restore_non_data_file_internal(FILE *in, FILE *out, pgFile *file, +restore_non_data_file_internal(FILE *in, pioDBWriter_i out, pgFile *file, const char *from_fullpath, const char *to_fullpath) { + err_i err; size_t read_len = 0; char *buf = pgut_malloc(STDIO_BUFSIZE); /* 64kB buffer */ @@ -978,9 +982,9 @@ restore_non_data_file_internal(FILE *in, FILE *out, pgFile *file, if (read_len > 0) { - if (fio_fwrite_async(out, buf, read_len) != read_len) - elog(ERROR, "Cannot write to \"%s\": %s", to_fullpath, - strerror(errno)); + err = $i(pioWrite, out, ft_bytes(buf, read_len)); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), ""); } if (feof(in)) @@ -994,12 +998,13 @@ restore_non_data_file_internal(FILE *in, FILE *out, pgFile *file, size_t restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, - pgFile *dest_file, FILE *out, const char *to_fullpath, + pgFile *dest_file, pioDBWriter_i out, const char *to_fullpath, bool already_exists) { char from_root[MAXPGPATH]; char from_fullpath[MAXPGPATH]; FILE *in = NULL; + err_i err; pgFile *tmp_file = NULL; pgBackup *tmp_backup = NULL; @@ -1043,9 +1048,12 @@ restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, if (tmp_file->write_size == 0) { /* In case of incremental restore truncate file just to be safe */ - if (already_exists && fio_ftruncate(out, 0)) - elog(ERROR, "Cannot truncate file \"%s\": %s", - to_fullpath, strerror(errno)); + if (already_exists) + { + err = $i(pioTruncate, out, 0); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), ""); + } return 0; } @@ -1090,9 +1098,9 @@ restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, } /* Checksum mismatch, truncate file and overwrite it */ - if (fio_ftruncate(out, 0)) - elog(ERROR, "Cannot truncate file \"%s\": %s", - to_fullpath, strerror(errno)); + err = $i(pioTruncate, out, 0); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), ""); } if (tmp_file->external_dir_num == 0) diff --git a/src/merge.c b/src/merge.c index eaab1c2ec..eeda3e528 100644 --- a/src/merge.c +++ b/src/merge.c @@ -917,12 +917,14 @@ merge_chain(InstanceState *instanceState, static void * merge_files(void *arg) { + FOBJ_FUNC_ARP(); int i; merge_files_arg *arguments = (merge_files_arg *) arg; size_t n_files = parray_num(arguments->dest_backup->files); for (i = 0; i < n_files; i++) { + FOBJ_LOOP_ARP(); pgFile *dest_file = (pgFile *) parray_get(arguments->dest_backup->files, i); pgFile *tmp_file; bool in_place = false; /* keep file as it is */ @@ -1176,11 +1178,13 @@ merge_data_file(parray *parent_chain, pgBackup *full_backup, const char *full_database_dir, bool use_bitmap, bool is_retry, bool no_sync) { - FILE *out = NULL; char *buffer = pgut_malloc(STDIO_BUFSIZE); char to_fullpath[MAXPGPATH]; char to_fullpath_tmp1[MAXPGPATH]; /* used for restore */ char to_fullpath_tmp2[MAXPGPATH]; /* used for backup */ + pioDBDrive_i drive = pioDBDriveForLocation(FIO_BACKUP_HOST); + pioDBWriter_i out; + err_i err; /* The next possible optimization is copying "as is" the file * from intermediate incremental backup, that didn`t changed in @@ -1193,20 +1197,19 @@ merge_data_file(parray *parent_chain, pgBackup *full_backup, snprintf(to_fullpath_tmp2, MAXPGPATH, "%s_tmp2", to_fullpath); /* open temp file */ - out = fopen(to_fullpath_tmp1, PG_BINARY_W); - if (out == NULL) - elog(ERROR, "Cannot open merge target file \"%s\": %s", - to_fullpath_tmp1, strerror(errno)); - setvbuf(out, buffer, _IOFBF, STDIO_BUFSIZE); + out = $i(pioOpenWrite, drive, to_fullpath_tmp1, .err = &err); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Open merge target file"); /* restore file into temp file */ tmp_file->size = restore_data_file(parent_chain, dest_file, out, to_fullpath_tmp1, use_bitmap, NULL, InvalidXLogRecPtr, NULL, /* when retrying merge header map cannot be trusted */ is_retry ? false : true); - if (fclose(out) != 0) - elog(ERROR, "Cannot close file \"%s\": %s", - to_fullpath_tmp1, strerror(errno)); + + err = $i(pioClose, out, .sync = false); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Closing target file"); pg_free(buffer); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 2e972b1ff..ddb5d4c74 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1040,18 +1040,12 @@ extern void backup_non_data_file_internal(const char *from_fullpath, const char *to_fullpath, pgFile *file, bool missing_ok); -extern size_t restore_data_file(parray *parent_chain, pgFile *dest_file, FILE *out, +extern size_t restore_data_file(parray *parent_chain, pgFile *dest_file, pioDBWriter_i out, const char *to_fullpath, bool use_bitmap, PageState *checksum_map, XLogRecPtr shift_lsn, datapagemap_t *lsn_map, bool use_headers); -extern size_t restore_data_file_internal(FILE *in, FILE *out, pgFile *file, uint32 backup_version, - const char *from_fullpath, const char *to_fullpath, int nblocks, - datapagemap_t *map, PageState *checksum_map, int checksum_version, - datapagemap_t *lsn_map, BackupPageHeader2 *headers); extern size_t restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, - pgFile *dest_file, FILE *out, const char *to_fullpath, + pgFile *dest_file, pioDBWriter_i out, const char *to_fullpath, bool already_exists); -extern void restore_non_data_file_internal(FILE *in, FILE *out, pgFile *file, - const char *from_fullpath, const char *to_fullpath); extern bool create_empty_file(const char *to_root, fio_location to_location, pgFile *file); extern PageState *get_checksum_map(const char *fullpath, uint32 checksum_version, diff --git a/src/restore.c b/src/restore.c index c098e7ea9..ed6e718d2 100644 --- a/src/restore.c +++ b/src/restore.c @@ -1115,23 +1115,28 @@ restore_chain(InstanceState *instanceState, static void * restore_files(void *arg) { + FOBJ_FUNC_ARP(); int i; size_t n_files; char to_fullpath[MAXPGPATH]; - FILE *out = NULL; - char *out_buf = pgut_malloc(STDIO_BUFSIZE); + + pioDBWriter_i out; + pioDBDrive_i db_drive; restore_files_arg *arguments = (restore_files_arg *) arg; n_files = parray_num(arguments->dest_files); + db_drive = pioDBDriveForLocation(FIO_DB_HOST); + for (i = 0; i < parray_num(arguments->dest_files); i++) { + FOBJ_LOOP_ARP(); bool already_exists = false; PageState *checksum_map = NULL; /* it should take ~1.5MB at most */ datapagemap_t *lsn_map = NULL; /* it should take 16kB at most */ - char *errmsg = NULL; /* remote agent error message */ pgFile *dest_file = (pgFile *) parray_get(arguments->dest_files, i); + err_i err = $noerr(); /* Directories were created before */ if (dest_file->kind == PIO_KIND_DIRECTORY) @@ -1228,28 +1233,17 @@ restore_files(void *arg) } } - /* - * Open dest file and truncate it to zero, if destination - * file already exists and dest file size is zero, or - * if file do not exist - */ - if ((already_exists && dest_file->write_size == 0) || !already_exists) - out = fio_fopen(FIO_DB_HOST, to_fullpath, PG_BINARY_W); - /* - * If file already exists and dest size is not zero, - * then open it for reading and writing. - */ - else - out = fio_fopen(FIO_DB_HOST, to_fullpath, PG_BINARY_R "+"); + out = $i(pioOpenWrite, db_drive, .path = to_fullpath, + .permissions = dest_file->mode, .err = &err); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Cannot open restore target file"); - if (out == NULL) - elog(ERROR, "Cannot open restore target file \"%s\": %s", - to_fullpath, strerror(errno)); - - /* update file permission */ - if (fio_chmod(FIO_DB_HOST, to_fullpath, dest_file->mode) == -1) - elog(ERROR, "Cannot change mode of \"%s\": %s", to_fullpath, - strerror(errno)); + if (already_exists && dest_file->write_size == 0) + { + err = $i(pioTruncate, out, .size = 0); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Cannot truncate datafile"); + } if (!dest_file->is_datafile || dest_file->is_cfs) elog(LOG, "Restoring non-data file: \"%s\"", to_fullpath); @@ -1263,9 +1257,6 @@ restore_files(void *arg) /* Restore destination file */ if (dest_file->is_datafile && !dest_file->is_cfs) { - /* enable stdio buffering for local destination data file */ - if (!fio_is_remote_file(out)) - setvbuf(out, out_buf, _IOFBF, STDIO_BUFSIZE); /* Destination file is data file */ arguments->restored_bytes += restore_data_file(arguments->parent_chain, dest_file, out, to_fullpath, @@ -1274,9 +1265,6 @@ restore_files(void *arg) } else { - /* disable stdio buffering for local destination non-data file */ - if (!fio_is_remote_file(out)) - setvbuf(out, NULL, _IONBF, BUFSIZ); /* Destination file is non-data file */ arguments->restored_bytes += restore_non_data_file(arguments->parent_chain, arguments->dest_backup, dest_file, out, to_fullpath, @@ -1284,14 +1272,10 @@ restore_files(void *arg) } done: - /* Writing is asynchronous in case of restore in remote mode, so check the agent status */ - if (fio_check_error_file(out, &errmsg)) - elog(ERROR, "Cannot write to the remote file \"%s\": %s", to_fullpath, errmsg); - /* close file */ - if (fio_fclose(out) != 0) - elog(ERROR, "Cannot close file \"%s\": %s", to_fullpath, - strerror(errno)); + err = $i(pioClose, out, .sync = false); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Close restored file"); /* free pagemap used for restore optimization */ pg_free(dest_file->pagemap.bitmap); @@ -1303,8 +1287,6 @@ restore_files(void *arg) pg_free(checksum_map); } - free(out_buf); - /* ssh connection to longer needed */ fio_disconnect(); diff --git a/src/utils/file.h b/src/utils/file.h index 7191e0c3b..34a4e0fe1 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -246,7 +246,7 @@ fobj_error_cstr_key(gzErrStr); #define mth__pioRead size_t, (ft_bytes_t, buf), (err_i *, err) #define mth__pioWrite err_i, (ft_bytes_t, buf) #define mth__pioWriteCompressed err_i, (ft_bytes_t, buf), (CompressAlg, compress_alg) -#define mth__pioTruncate err_i, (uint64_t, sz) +#define mth__pioTruncate err_i, (uint64_t, size) #define mth__pioWriteFinish err_i #define mth__pioSeek err_i, (uint64_t, offs) From 36335b6047cbd7d056d2233b5463634d7400dc5a Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 15 Dec 2022 05:45:34 +0300 Subject: [PATCH 186/339] remove pioReadFull looks like all pioRead methods already tries to read all possible data --- src/utils/file.c | 23 +---------------------- src/utils/file.h | 2 -- 2 files changed, 1 insertion(+), 24 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index 1e2f87cfa..07d2cd7d8 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -5307,7 +5307,7 @@ pioReadFilter_pioRead(VSelf, ft_bytes_t wbuf, err_i *err) if ($notNULL(self->inplace) && !self->eof) { - r = pioReadFull(self->wrapped, wbuf, err); + r = $i(pioRead, self->wrapped, wbuf, err); if (r > 0) { err_i flterr; @@ -5940,27 +5940,6 @@ pioCopyWithFilters(pioWriteFlush_i dest, pioRead_i src, return $iresult(err); } -size_t -pioReadFull(pioRead_i src, ft_bytes_t bytes, err_i* err) -{ - ft_bytes_t b; - size_t r; - fobj_reset_err(err); - - b = bytes; - while (b.len) - { - r = $i(pioRead, src, b, err); - Assert(r <= b.len); - ft_bytes_consume(&b, r); - if ($haserr(*err)) - break; - if (r == 0) - break; - } - return bytes.len - b.len; -} - void init_pio_line_reader(pio_line_reader *r, pioRead_i source, size_t max_length) { r->source = $iref(source); diff --git a/src/utils/file.h b/src/utils/file.h index 34a4e0fe1..e5a4776c3 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -443,8 +443,6 @@ extern err_i pioCopyWithFilters(pioWriteFlush_i dest, pioRead_i src, pioCopyWithFilters((dest), (src), _fltrs_, ft_arrsz(_fltrs_), NULL); \ }) -extern size_t pioReadFull(pioRead_i src, ft_bytes_t bytes, err_i* err); - typedef struct pio_line_reader pio_line_reader; struct pio_line_reader { pioRead_i source; From ad178a80d0467de949aa03f5df2c22faad722f0d Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 15 Dec 2022 06:51:45 +0300 Subject: [PATCH 187/339] [PBCKP-404] use pioReader in restore_data_file and validate_file_pages reorganize common code in those functions into backup_page_iterator. --- src/data.c | 538 +++++++++++++++++++++++++---------------------------- 1 file changed, 250 insertions(+), 288 deletions(-) diff --git a/src/data.c b/src/data.c index 48c7902e2..3093b3149 100644 --- a/src/data.c +++ b/src/data.c @@ -33,8 +33,29 @@ typedef struct DataPage char data[BLCKSZ]; } DataPage; -static bool get_page_header(FILE *in, const char *fullpath, BackupPageHeader *bph, - pg_crc32 *crc, uint32 backup_version); +typedef struct backup_page_iterator { + /* arguments */ + const char* fullpath; + pioReader_i in; + BackupPageHeader2 *headers; + int n_headers; + uint32_t backup_version; + CompressAlg compress_alg; + + /* iterator value */ + int64_t cur_pos; + int64_t read_pos; + BlockNumber blknum; + XLogRecPtr page_lsn; + uint16_t page_crc; + uint32_t n_hdr; + bool truncated; + bool is_compressed; + ft_bytes_t whole_read; + ft_bytes_t read_to; + ft_bytes_t compressed; + DataPage page; +} backup_page_iterator; static err_i send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, XLogRecPtr prev_backup_start_lsn, CompressAlg calg, int clevel, @@ -45,7 +66,7 @@ static err_i copy_pages(const char *to_fullpath, const char *from_fullpath, pgFi XLogRecPtr sync_lsn, uint32 checksum_version, BackupMode backup_mode); -static size_t restore_data_file_internal(FILE *in, pioDBWriter_i out, pgFile *file, uint32 backup_version, +static size_t restore_data_file_internal(pioReader_i in, pioDBWriter_i out, pgFile *file, uint32 backup_version, const char *from_fullpath, const char *to_fullpath, int nblocks, datapagemap_t *map, PageState *checksum_map, int checksum_version, datapagemap_t *lsn_map, BackupPageHeader2 *headers); @@ -153,7 +174,7 @@ do_decompress(void *dst, size_t dst_size, void const *src, size_t src_size, * But at least we will do this check only for pages which will no pass validation step. */ static bool -page_may_be_compressed(Page page, CompressAlg alg, uint32 backup_version) +page_may_be_compressed(Page page, CompressAlg alg) { PageHeader phdr; @@ -169,12 +190,6 @@ page_may_be_compressed(Page page, CompressAlg alg, uint32 backup_version) phdr->pd_special <= BLCKSZ && phdr->pd_special == MAXALIGN(phdr->pd_special))) { - /* ... end only if it is invalid, then do more checks */ - if (backup_version >= 20023) - { - /* Versions 2.0.23 and higher don't have such bug */ - return false; - } #ifdef HAVE_LIBZ /* For zlib we can check page magic: * https://stackoverflow.com/questions/9050260/what-does-a-zlib-header-look-like @@ -584,6 +599,7 @@ restore_data_file(parray *parent_chain, pgFile *dest_file, pioDBWriter_i out, char *in_buf = pgut_malloc(STDIO_BUFSIZE); int backup_seq = 0; err_i err; + pioDrive_i backup_drive = pioDriveForLocation(FIO_BACKUP_HOST); /* * FULL -> INCR -> DEST @@ -602,9 +618,10 @@ restore_data_file(parray *parent_chain, pgFile *dest_file, pioDBWriter_i out, // for (i = 0; i < parray_num(parent_chain); i++) while (backup_seq >= 0 && backup_seq < parray_num(parent_chain)) { + FOBJ_LOOP_ARP(); char from_root[MAXPGPATH]; char from_fullpath[MAXPGPATH]; - FILE *in = NULL; + pioReader_i in; pgFile **res_file = NULL; pgFile *tmp_file = NULL; @@ -648,13 +665,9 @@ restore_data_file(parray *parent_chain, pgFile *dest_file, pioDBWriter_i out, join_path_components(from_root, backup->root_dir, DATABASE_DIR); join_path_components(from_fullpath, from_root, tmp_file->rel_path); - in = fopen(from_fullpath, PG_BINARY_R); - if (in == NULL) - elog(ERROR, "Cannot open backup file \"%s\": %s", from_fullpath, - strerror(errno)); - - /* set stdio buffering for input data file */ - setvbuf(in, in_buf, _IOFBF, STDIO_BUFSIZE); + in = $i(pioOpenRead, backup_drive, from_fullpath, .err = &err); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Open backup file"); /* get headers for this file */ if (use_headers && tmp_file->n_headers > 0) @@ -680,9 +693,7 @@ restore_data_file(parray *parent_chain, pgFile *dest_file, pioDBWriter_i out, backup->stop_lsn <= shift_lsn ? lsn_map : NULL, headers); - if (fclose(in) != 0) - elog(ERROR, "Cannot close file \"%s\": %s", from_fullpath, - strerror(errno)); + $i(pioClose, in); pg_free(headers); @@ -700,6 +711,146 @@ restore_data_file(parray *parent_chain, pgFile *dest_file, pioDBWriter_i out, return total_write_len; } +static bool +backup_page_next(backup_page_iterator *it) +{ + size_t read_len; + int32_t compressed_size; + err_i err; + + + it->truncated = false; + /* newer backups have headers in separate storage */ + if (it->headers) + { + BackupPageHeader2* hd; + uint32_t n_hdr = it->n_hdr; + if (n_hdr >= it->n_headers) + return false; + it->n_hdr++; + + hd = &it->headers[n_hdr]; + it->blknum = hd->block; + it->page_lsn = hd->lsn; + it->page_crc = hd->checksum; + + ft_assert(hd->pos >= 0); + ft_assert((hd+1)->pos > hd->pos + sizeof(BackupPageHeader)); + it->read_pos = hd->pos; + + /* calculate payload size by comparing current and next page positions */ + read_len = (hd+1)->pos - hd->pos; + it->read_to = ft_bytes(&it->page, read_len); + compressed_size = read_len - sizeof(BackupPageHeader); + ft_assert(compressed_size <= BLCKSZ); + it->whole_read = ft_bytes(&it->page, read_len); + } + else + { + /* We get into this branch either when restoring old backup + * or when merging something. Align read_len only when restoring + * or merging old backups. + */ + read_len = $i(pioRead, it->in, ft_bytes(&it->page.bph, sizeof(it->page.bph)), + .err = &err); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Reading block header"); + if (read_len == 0) /* end of file */ + return false; + if (read_len != sizeof(it->page.bph)) + ft_log(FT_FATAL, "Cannot read header at offset %lld of \"%s\"", + (long long)it->cur_pos, it->fullpath); + if (it->page.bph.block == 0 && it->page.bph.compressed_size == 0) + ft_log(FT_FATAL, "Empty block in file \"%s\"", it->fullpath); + + it->cur_pos += sizeof(BackupPageHeader); + it->read_pos = it->cur_pos; + it->blknum = it->page.bph.block; + compressed_size = it->page.bph.compressed_size; + if (compressed_size == PageIsTruncated) + { + it->truncated = true; + compressed_size = 0; + } + ft_assert(compressed_size >= 0 && compressed_size <= BLCKSZ); + it->page_lsn = 0; + it->page_crc = 0; + + /* this has a potential to backfire when retrying merge of old backups, + * so we just forbid the retrying of failed merges between versions >= 2.4.0 and + * version < 2.4.0 + */ + if (it->backup_version >= 20400) + read_len = compressed_size; + else + /* For some unknown and possibly dump reason I/O operations + * in versions < 2.4.0 were always aligned to 8 bytes. + * Now we have to deal with backward compatibility. + */ + read_len = MAXALIGN(compressed_size); + it->read_to = ft_bytes(&it->page.data, read_len); + it->whole_read = ft_bytes(&it->page, + sizeof(BackupPageHeader) + read_len); + } + + it->compressed = ft_bytes(&it->page.data, compressed_size); + return true; +} + +static err_i +backup_page_read(backup_page_iterator *it) +{ + err_i err; + size_t read_len; + + if (it->read_pos != it->cur_pos) + { + err = $i(pioSeek, it->in, it->read_pos); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Cannot seek block %u", + it->blknum); + it->cur_pos = it->read_pos; + } + + read_len = $i(pioRead, it->in, it->read_to, &err); + if ($haserr(err)) + return $err(RT, "Cannot read block {blknum} of file {path}: {cause}", + blknum(it->blknum), cause(err.self), path(it->fullpath)); + if (read_len != it->read_to.len) + return $err(RT, "Short read of block {blknum} of file {path}", + blknum(it->blknum), path(it->fullpath)); + it->cur_pos += read_len; + + it->is_compressed = it->compressed.len != BLCKSZ; + /* + * Compression skip magic part 2: + * if page size is smaller than BLCKSZ, decompress the page. + * BUGFIX for versions < 2.0.23: if page size is equal to BLCKSZ. + * we have to check, whether it is compressed or not using + * page_may_be_compressed() function. + */ + if (!it->is_compressed && it->backup_version < 20023 && + page_may_be_compressed(it->compressed.ptr, it->compress_alg)) + { + it->is_compressed = true; + } + return $noerr(); +} + +static err_i +backup_page_skip(backup_page_iterator *it) +{ + if (it->headers != NULL) + return $noerr(); + + /* Backward compatibility kludge TODO: remove in 3.0 + * go to the next page. + */ + it->cur_pos += it->read_to.len; + it->read_pos = it->cur_pos; + return $i(pioSeek, it->in, it->cur_pos); +} + /* Restore block from "in" file to "out" file. * If "nblocks" is greater than zero, then skip restoring blocks, * whose position if greater than "nblocks". @@ -710,18 +861,24 @@ restore_data_file(parray *parent_chain, pgFile *dest_file, pioDBWriter_i out, * marked as already restored, then page is skipped. */ size_t -restore_data_file_internal(FILE *in, pioDBWriter_i out, pgFile *file, uint32 backup_version, +restore_data_file_internal(pioReader_i in, pioDBWriter_i out, pgFile *file, uint32 backup_version, const char *from_fullpath, const char *to_fullpath, int nblocks, datapagemap_t *map, PageState *checksum_map, int checksum_version, datapagemap_t *lsn_map, BackupPageHeader2 *headers) { - BlockNumber blknum = 0; - int n_hdr = -1; size_t write_len = 0; off_t cur_pos_out = 0; - off_t cur_pos_in = 0; err_i err = $noerr(); + backup_page_iterator iter = { + .fullpath = from_fullpath, + .in = in, + .headers = headers, + .n_headers = file->n_headers, + .backup_version = backup_version, + .compress_alg = file->compress_alg, + }; + /* should not be possible */ Assert(!(backup_version >= 20400 && file->n_headers <= 0)); @@ -739,73 +896,14 @@ restore_data_file_internal(FILE *in, pioDBWriter_i out, pgFile *file, uint32 bac if ($haserr(err)) ft_logerr(FT_FATAL, $errmsg(err), "Cannot seek block %u"); - for (;;) + while (backup_page_next(&iter)) { off_t write_pos; - size_t len; - size_t read_len; - DataPage page; - int32 compressed_size = 0; - bool is_compressed = false; - - /* incremental restore vars */ - uint16 page_crc = 0; - XLogRecPtr page_lsn = InvalidXLogRecPtr; /* check for interrupt */ if (interrupted || thread_interrupted) elog(ERROR, "Interrupted during data file restore"); - /* newer backups have headers in separate storage */ - if (headers) - { - n_hdr++; - if (n_hdr >= file->n_headers) - break; - - blknum = headers[n_hdr].block; - page_lsn = headers[n_hdr].lsn; - page_crc = headers[n_hdr].checksum; - /* calculate payload size by comparing current and next page positions, - * page header is not included */ - compressed_size = headers[n_hdr+1].pos - headers[n_hdr].pos - sizeof(BackupPageHeader); - - Assert(compressed_size > 0); - Assert(compressed_size <= BLCKSZ); - - read_len = compressed_size + sizeof(BackupPageHeader); - } - else - { - /* We get into this function either when restoring old backup - * or when merging something. Align read_len only when restoring - * or merging old backups. - */ - if (get_page_header(in, from_fullpath, &(page).bph, NULL, backup_version)) - { - cur_pos_in += sizeof(BackupPageHeader); - - /* backward compatibility kludge TODO: remove in 3.0 */ - blknum = page.bph.block; - compressed_size = page.bph.compressed_size; - - /* this has a potential to backfire when retrying merge of old backups, - * so we just forbid the retrying of failed merges between versions >= 2.4.0 and - * version < 2.4.0 - */ - if (backup_version >= 20400) - read_len = compressed_size; - else - /* For some unknown and possibly dump reason I/O operations - * in versions < 2.4.0 were always aligned to 8 bytes. - * Now we have to deal with backward compatibility. - */ - read_len = MAXALIGN(compressed_size); - } - else - break; - } - /* * Backward compatibility kludge: in the good old days * n_blocks attribute was available only in DELTA backups. @@ -818,37 +916,31 @@ restore_data_file_internal(FILE *in, pioDBWriter_i out, pgFile *file, uint32 bac * is not happening in the first place. * TODO: remove in 3.0.0 */ - if (compressed_size == PageIsTruncated) + if (iter.truncated) { /* * Block header contains information that this block was truncated. * We need to truncate file to this length. */ - elog(VERBOSE, "Truncate file \"%s\" to block %u", to_fullpath, blknum); + elog(VERBOSE, "Truncate file \"%s\" to block %u", to_fullpath, iter.blknum); - err = $i(pioTruncate, out, (uint64_t)blknum * BLCKSZ); + err = $i(pioTruncate, out, (uint64_t)iter.blknum * BLCKSZ); if ($haserr(err)) ft_logerr(FT_FATAL, $errmsg(err), ""); break; } - Assert(compressed_size > 0); - Assert(compressed_size <= BLCKSZ); - /* no point in writing redundant data */ - if (nblocks > 0 && blknum >= nblocks) + if (nblocks > 0 && iter.blknum >= nblocks) break; - if (compressed_size > BLCKSZ) - elog(ERROR, "Size of a blknum %i exceed BLCKSZ: %i", blknum, compressed_size); - /* Incremental restore in LSN mode */ - if (map && lsn_map && datapagemap_is_set(lsn_map, blknum)) - datapagemap_add(map, blknum); + if (map && lsn_map && datapagemap_is_set(lsn_map, iter.blknum)) + datapagemap_add(map, iter.blknum); - if (map && checksum_map && checksum_map[blknum].checksum != 0) + if (map && checksum_map && checksum_map[iter.blknum].checksum != 0) { //elog(INFO, "HDR CRC: %u, MAP CRC: %u", page_crc, checksum_map[blknum].checksum); /* @@ -856,73 +948,37 @@ restore_data_file_internal(FILE *in, pioDBWriter_i out, pgFile *file, uint32 bac * If page in backup has the same checksum and lsn as * page in backup, then page can be skipped. */ - if (page_crc == checksum_map[blknum].checksum && - page_lsn == checksum_map[blknum].lsn) + if (iter.page_crc == checksum_map[iter.blknum].checksum && + iter.page_lsn == checksum_map[iter.blknum].lsn) { - datapagemap_add(map, blknum); + datapagemap_add(map, iter.blknum); } } /* if this page is marked as already restored, then skip it */ - if (map && datapagemap_is_set(map, blknum)) + if (map && datapagemap_is_set(map, iter.blknum)) { - /* Backward compatibility kludge TODO: remove in 3.0 - * go to the next page. - */ - if (!headers && fseek(in, read_len, SEEK_CUR) != 0) - elog(ERROR, "Cannot seek block %u of \"%s\": %s", - blknum, from_fullpath, strerror(errno)); + backup_page_skip(&iter); continue; } - if (headers && - cur_pos_in != headers[n_hdr].pos) - { - if (fseek(in, headers[n_hdr].pos, SEEK_SET) != 0) - elog(ERROR, "Cannot seek to offset %u of \"%s\": %s", - headers[n_hdr].pos, from_fullpath, strerror(errno)); - - cur_pos_in = headers[n_hdr].pos; - } - - /* read a page from file */ - if (headers) - len = fread(&page, 1, read_len, in); - else - len = fread(page.data, 1, read_len, in); - - if (len != read_len) - elog(ERROR, "Cannot read block %u file \"%s\": %s", - blknum, from_fullpath, strerror(errno)); - - cur_pos_in += read_len; - - /* - * Compression skip magic part 2: - * if page size is smaller than BLCKSZ, decompress the page. - * BUGFIX for versions < 2.0.23: if page size is equal to BLCKSZ. - * we have to check, whether it is compressed or not using - * page_may_be_compressed() function. - */ - if (compressed_size != BLCKSZ - || page_may_be_compressed(page.data, file->compress_alg, backup_version)) - { - is_compressed = true; - } + err = backup_page_read(&iter); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), ""); /* * Seek and write the restored page. * When restoring file from FULL backup, pages are written sequentially, * so there is no need to issue fseek for every page. */ - write_pos = blknum * BLCKSZ; + write_pos = iter.blknum * BLCKSZ; if (cur_pos_out != write_pos) { err = $i(pioSeek, out, write_pos); if ($haserr(err)) ft_logerr(FT_FATAL, $errmsg(err), "Cannot seek block %u", - blknum); + iter.blknum); cur_pos_out = write_pos; } @@ -931,21 +987,21 @@ restore_data_file_internal(FILE *in, pioDBWriter_i out, pgFile *file, uint32 bac * If page is compressed and restore is in remote mode, * send compressed page to the remote side. */ - if (is_compressed) - err = $i(pioWriteCompressed, out, ft_bytes(page.data, compressed_size), + if (iter.is_compressed) + err = $i(pioWriteCompressed, out, iter.compressed, .compress_alg = file->compress_alg); else - err = $i(pioWrite, out, ft_bytes(page.data, BLCKSZ)); + err = $i(pioWrite, out, iter.compressed); if ($haserr(err)) ft_logerr(FT_FATAL, $errmsg(err), "Cannot write block %u", - blknum); + iter.blknum); write_len += BLCKSZ; cur_pos_out += BLCKSZ; /* update current write position */ /* Mark page as restored to avoid reading this page when restoring parent backups */ if (map) - datapagemap_add(map, blknum); + datapagemap_add(map, iter.blknum); } elog(LOG, "Copied file \"%s\": %zu bytes", from_fullpath, write_len); @@ -1368,27 +1424,34 @@ bool validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, uint32 checksum_version, uint32 backup_version, HeaderMap *hdr_map) { - size_t read_len = 0; + FOBJ_FUNC_ARP(); bool is_valid = true; - FILE *in; pg_crc32 crc; BackupPageHeader2 *headers = NULL; - int n_hdr = -1; - off_t cur_pos_in = 0; + pioDrive_i drive; + err_i err; + + backup_page_iterator iter = { + .fullpath = fullpath, + .n_headers = file->n_headers, + .backup_version = backup_version, + .compress_alg = file->compress_alg, + }; elog(LOG, "Validate relation blocks for file \"%s\"", fullpath); /* should not be possible */ Assert(!(backup_version >= 20400 && file->n_headers <= 0)); - in = fopen(fullpath, PG_BINARY_R); - if (in == NULL) - elog(ERROR, "Cannot open file \"%s\": %s", - fullpath, strerror(errno)); + drive = pioDriveForLocation(FIO_BACKUP_HOST); + + iter.in = $i(pioOpenRead, drive, fullpath, .err = &err); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), ""); - headers = get_data_file_headers(hdr_map, file, backup_version, false); + iter.headers = get_data_file_headers(hdr_map, file, backup_version, false); - if (!headers && file->n_headers > 0) + if (!iter.headers && file->n_headers > 0) { elog(WARNING, "Cannot get page headers for file \"%s\"", fullpath); return false; @@ -1398,155 +1461,93 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, INIT_CRC32_COMPAT(backup_version, crc); /* read and validate pages one by one */ - while (true) + while (backup_page_next(&iter)) { int rc = 0; - size_t len = 0; - DataPage compressed_page; /* used as read buffer */ - int compressed_size = 0; DataPage page; - BlockNumber blknum = 0; + ft_bytes_t uncompressed; PageState page_st; if (interrupted || thread_interrupted) elog(ERROR, "Interrupted during data file validation"); - /* newer backups (post 2.4.0) have page headers in separate storage */ - if (headers) - { - n_hdr++; - if (n_hdr >= file->n_headers) - break; - - blknum = headers[n_hdr].block; - /* calculate payload size by comparing current and next page positions, - * page header is not included. - */ - compressed_size = headers[n_hdr+1].pos - headers[n_hdr].pos - sizeof(BackupPageHeader); - - Assert(compressed_size > 0); - Assert(compressed_size <= BLCKSZ); - - read_len = sizeof(BackupPageHeader) + compressed_size; - - if (cur_pos_in != headers[n_hdr].pos) - { - if (fio_fseek(in, headers[n_hdr].pos) < 0) - elog(ERROR, "Cannot seek block %u of \"%s\": %s", - blknum, fullpath, strerror(errno)); - else - elog(VERBOSE, "Seek to %u", headers[n_hdr].pos); - - cur_pos_in = headers[n_hdr].pos; - } - } - /* old backups (pre 2.4.0) rely on header located directly in data file */ - else - { - if (get_page_header(in, fullpath, &(compressed_page).bph, &crc, backup_version)) - { - /* Backward compatibility kludge, TODO: remove in 3.0 - * for some reason we padded compressed pages in old versions - */ - blknum = compressed_page.bph.block; - compressed_size = compressed_page.bph.compressed_size; - read_len = MAXALIGN(compressed_size); - } - else - break; - } - /* backward compatibility kludge TODO: remove in 3.0 */ - if (compressed_size == PageIsTruncated) + if (iter.truncated) { elog(VERBOSE, "Block %u of \"%s\" is truncated", - blknum, fullpath); + iter.blknum, fullpath); continue; } - Assert(compressed_size <= BLCKSZ); - Assert(compressed_size > 0); - - if (headers) - len = fread(&compressed_page, 1, read_len, in); - else - len = fread(compressed_page.data, 1, read_len, in); + ft_assert(iter.read_pos == iter.cur_pos); - if (len != read_len) + err = backup_page_read(&iter); + if ($haserr(err)) { - elog(WARNING, "Cannot read block %u file \"%s\": %s", - blknum, fullpath, strerror(errno)); + ft_logerr(FT_WARNING, $errmsg(err), ""); return false; } - /* update current position */ - cur_pos_in += read_len; - - if (headers) - COMP_CRC32_COMPAT(backup_version, crc, &compressed_page, read_len); - else - COMP_CRC32_COMPAT(backup_version, crc, compressed_page.data, read_len); + COMP_CRC32_COMPAT(backup_version, crc, iter.whole_read.ptr, iter.whole_read.len); - if (compressed_size != BLCKSZ - || page_may_be_compressed(compressed_page.data, file->compress_alg, - backup_version)) + if (iter.is_compressed) { int32 uncompressed_size = 0; const char *errormsg = NULL; uncompressed_size = do_decompress(page.data, BLCKSZ, - compressed_page.data, - compressed_size, + iter.compressed.ptr, + iter.compressed.len, file->compress_alg, &errormsg); if (uncompressed_size < 0 && errormsg != NULL) { elog(WARNING, "An error occured during decompressing block %u of file \"%s\": %s", - blknum, fullpath, errormsg); + iter.blknum, fullpath, errormsg); return false; } if (uncompressed_size != BLCKSZ) { - if (compressed_size == BLCKSZ) + elog(WARNING, "Page %u of file \"%s\" uncompressed to %d bytes. != BLCKSZ", + iter.blknum, fullpath, uncompressed_size); + if (iter.compressed.len == BLCKSZ) { is_valid = false; continue; } - elog(WARNING, "Page %u of file \"%s\" uncompressed to %d bytes. != BLCKSZ", - blknum, fullpath, uncompressed_size); return false; } - rc = validate_one_page(page.data, - file->segno * RELSEG_SIZE + blknum, - stop_lsn, &page_st, checksum_version); + uncompressed = ft_bytes(page.data, BLCKSZ); } else - rc = validate_one_page(compressed_page.data, - file->segno * RELSEG_SIZE + blknum, - stop_lsn, &page_st, checksum_version); + uncompressed = iter.compressed; + + rc = validate_one_page(uncompressed.ptr, + file->segno * RELSEG_SIZE + iter.blknum, + stop_lsn, &page_st, checksum_version); switch (rc) { case PAGE_IS_NOT_FOUND: - elog(VERBOSE, "File \"%s\", block %u, page is NULL", file->rel_path, blknum); + elog(VERBOSE, "File \"%s\", block %u, page is NULL", file->rel_path, iter.blknum); break; case PAGE_IS_ZEROED: - elog(VERBOSE, "File: %s blknum %u, empty zeroed page", file->rel_path, blknum); + elog(VERBOSE, "File: %s blknum %u, empty zeroed page", file->rel_path, iter.blknum); break; case PAGE_HEADER_IS_INVALID: - elog(WARNING, "Page header is looking insane: %s, block %i", file->rel_path, blknum); + elog(WARNING, "Page header is looking insane: %s, block %i", file->rel_path, iter.blknum); is_valid = false; break; case PAGE_CHECKSUM_MISMATCH: - elog(WARNING, "File: %s blknum %u have wrong checksum: %u", file->rel_path, blknum, page_st.checksum); + elog(WARNING, "File: %s blknum %u have wrong checksum: %u", file->rel_path, iter.blknum, page_st.checksum); is_valid = false; break; case PAGE_LSN_FROM_FUTURE: elog(WARNING, "File: %s, block %u, checksum is %s. " "Page is from future: pageLSN %X/%X stopLSN %X/%X", - file->rel_path, blknum, + file->rel_path, iter.blknum, checksum_version ? "correct" : "not enabled", (uint32) (page_st.lsn >> 32), (uint32) page_st.lsn, (uint32) (stop_lsn >> 32), (uint32) stop_lsn); @@ -1555,7 +1556,7 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, } FIN_CRC32_COMPAT(backup_version, crc); - fclose(in); + $i(pioClose, iter.in); if (crc != file->crc) { @@ -1707,45 +1708,6 @@ get_lsn_map(const char *fullpath, uint32 checksum_version, return lsn_map; } -/* Every page in data file contains BackupPageHeader, extract it */ -bool -get_page_header(FILE *in, const char *fullpath, BackupPageHeader* bph, - pg_crc32 *crc, uint32 backup_version) -{ - /* read BackupPageHeader */ - size_t read_len = fread(bph, 1, sizeof(BackupPageHeader), in); - - if (ferror(in)) - elog(ERROR, "Cannot read file \"%s\": %s", - fullpath, strerror(errno)); - - if (read_len != sizeof(BackupPageHeader)) - { - if (read_len == 0 && feof(in)) - return false; /* EOF found */ - else if (read_len != 0 && feof(in)) - elog(ERROR, - "Odd size page found at offset %lld of \"%s\"", - (long long)ftello(in), fullpath); - else - elog(ERROR, "Cannot read header at offset %lld of \"%s\": %s", - (long long)ftello(in), fullpath, strerror(errno)); - } - - /* In older versions < 2.4.0, when crc for file was calculated, header was - * not included in crc calculations. Now it is. And now we have - * the problem of backward compatibility for backups of old versions - */ - if (crc) - COMP_CRC32_COMPAT(backup_version, *crc, bph, read_len); - - if (bph->block == 0 && bph->compressed_size == 0) - elog(ERROR, "Empty block in file \"%s\"", fullpath); - - Assert(bph->compressed_size != 0); - return true; -} - /* Open local backup file for writing, set permissions and buffering */ FILE* open_local_file_rw(const char *to_fullpath, char **out_buf, uint32 buf_size) From fdda848511c0a972a7e27d1680618d63a57c4ff8 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 15 Dec 2022 06:59:56 +0300 Subject: [PATCH 188/339] [PBCKP-404] use pioCopy instead of restore_non_data_file_internal --- src/data.c | 70 +++++++++--------------------------------------------- 1 file changed, 11 insertions(+), 59 deletions(-) diff --git a/src/data.c b/src/data.c index 3093b3149..78edbaa80 100644 --- a/src/data.c +++ b/src/data.c @@ -70,8 +70,6 @@ static size_t restore_data_file_internal(pioReader_i in, pioDBWriter_i out, pgFi const char *from_fullpath, const char *to_fullpath, int nblocks, datapagemap_t *map, PageState *checksum_map, int checksum_version, datapagemap_t *lsn_map, BackupPageHeader2 *headers); -static void restore_non_data_file_internal(FILE *in, pioDBWriter_i out, pgFile *file, - const char *from_fullpath, const char *to_fullpath); #ifdef HAVE_LIBZ /* Implementation of zlib compression method */ @@ -1008,50 +1006,6 @@ restore_data_file_internal(pioReader_i in, pioDBWriter_i out, pgFile *file, uint return write_len; } -/* - * Copy file to backup. - * We do not apply compression to these files, because - * it is either small control file or already compressed cfs file. - */ -void -restore_non_data_file_internal(FILE *in, pioDBWriter_i out, pgFile *file, - const char *from_fullpath, const char *to_fullpath) -{ - err_i err; - size_t read_len = 0; - char *buf = pgut_malloc(STDIO_BUFSIZE); /* 64kB buffer */ - - /* copy content */ - for (;;) - { - read_len = 0; - - /* check for interrupt */ - if (interrupted || thread_interrupted) - elog(ERROR, "Interrupted during non-data file restore"); - - read_len = fread(buf, 1, STDIO_BUFSIZE, in); - - if (ferror(in)) - elog(ERROR, "Cannot read backup file \"%s\": %s", - from_fullpath, strerror(errno)); - - if (read_len > 0) - { - err = $i(pioWrite, out, ft_bytes(buf, read_len)); - if ($haserr(err)) - ft_logerr(FT_FATAL, $errmsg(err), ""); - } - - if (feof(in)) - break; - } - - pg_free(buf); - - elog(LOG, "Copied file \"%s\": %llu bytes", from_fullpath, (long long)file->write_size); -} - size_t restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, pgFile *dest_file, pioDBWriter_i out, const char *to_fullpath, @@ -1059,7 +1013,7 @@ restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, { char from_root[MAXPGPATH]; char from_fullpath[MAXPGPATH]; - FILE *in = NULL; + pioReadStream_i in; err_i err; pgFile *tmp_file = NULL; @@ -1171,20 +1125,18 @@ restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, join_path_components(from_fullpath, from_root, dest_file->rel_path); - in = fopen(from_fullpath, PG_BINARY_R); - if (in == NULL) - elog(ERROR, "Cannot open backup file \"%s\": %s", from_fullpath, - strerror(errno)); - - /* disable stdio buffering for non-data files */ - setvbuf(in, NULL, _IONBF, BUFSIZ); + in = $i(pioOpenReadStream, dest_backup->backup_location, from_fullpath, + .err = &err); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Open backup file"); - /* do actual work */ - restore_non_data_file_internal(in, out, tmp_file, from_fullpath, to_fullpath); + err = pioCopy($reduce(pioWriteFlush, out), $reduce(pioRead, in)); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Copying backup file"); - if (fclose(in) != 0) - elog(ERROR, "Cannot close file \"%s\": %s", from_fullpath, - strerror(errno)); + $i(pioClose, in); + elog(LOG, "Copied file \"%s\": %llu bytes", from_fullpath, + (long long)dest_file->write_size); return tmp_file->write_size; } From d3ad1f43188b0cf0e5293c9b771d0fae688e65c9 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 15 Dec 2022 07:33:21 +0300 Subject: [PATCH 189/339] remove a lot of unused fio functions --- src/archive.c | 8 +- src/utils/file.c | 420 ----------------------------------------------- src/utils/file.h | 26 --- 3 files changed, 5 insertions(+), 449 deletions(-) diff --git a/src/archive.c b/src/archive.c index 548418da8..6dc78af19 100644 --- a/src/archive.c +++ b/src/archive.c @@ -324,8 +324,10 @@ push_file(WALSegno *xlogfile, const char *archive_status_dir, bool no_ready_rename, bool is_compress, int compress_level) { + FOBJ_FUNC_ARP(); bool skipped = false; err_i err; + pioDBDrive_i drive = pioDBDriveForLocation(FIO_DB_HOST); elog(LOG, "pushing file \"%s\"", xlogfile->name); @@ -355,9 +357,9 @@ push_file(WALSegno *xlogfile, const char *archive_status_dir, elog(LOG, "Rename \"%s\" to \"%s\"", wal_file_ready, wal_file_done); /* do not error out, if rename failed */ - if (fio_rename(FIO_DB_HOST, wal_file_ready, wal_file_done) < 0) - elog(WARNING, "Cannot rename ready file \"%s\" to \"%s\": %s", - wal_file_ready, wal_file_done, strerror(errno)); + err = $i(pioRename, drive, wal_file_ready, wal_file_done); + if ($haserr(err)) + ft_logerr(FT_WARNING, $errmsg(err), "Renaming ready file"); } return skipped; diff --git a/src/utils/file.c b/src/utils/file.c index 07d2cd7d8..4e59fa74b 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -587,71 +587,6 @@ fio_disconnect(void) } } -/* Open stdio file */ -FILE* -fio_fopen(fio_location location, const char* path, const char* mode) -{ - FILE *f = NULL; - - if (fio_is_remote(location)) - { - int flags = 0; - int fd; - if (strcmp(mode, PG_BINARY_W) == 0) { - flags = O_TRUNC|PG_BINARY|O_RDWR|O_CREAT; - } else if (strcmp(mode, "w") == 0) { - flags = O_TRUNC|O_RDWR|O_CREAT; - } else if (strcmp(mode, PG_BINARY_R) == 0) { - flags = O_RDONLY|PG_BINARY; - } else if (strcmp(mode, "r") == 0) { - flags = O_RDONLY; - } else if (strcmp(mode, PG_BINARY_R "+") == 0) { - /* stdio fopen("rb+") actually doesn't create unexisted file, but probackup frequently - * needs to open existed file or create new one if not exists. - * In stdio it can be done using two fopen calls: fopen("r+") and if failed then fopen("w"). - * But to eliminate extra call which especially critical in case of remote connection - * we change r+ semantic to create file if not exists. - */ - flags = O_RDWR|O_CREAT|PG_BINARY; - } else if (strcmp(mode, "r+") == 0) { /* see comment above */ - flags |= O_RDWR|O_CREAT; - } else if (strcmp(mode, "a") == 0) { - flags |= O_CREAT|O_RDWR|O_APPEND; - } else { - Assert(false); - } - fd = fio_open(location, path, flags); - if (fd >= 0) - f = (FILE*)(size_t)((fd + 1) & ~FIO_PIPE_MARKER); - } - else - { - f = fopen(path, mode); - if (f == NULL && strcmp(mode, PG_BINARY_R "+") == 0) - f = fopen(path, PG_BINARY_W); - } - return f; -} - -/* Flush stream data (does nothing for remote file) */ -int -fio_fflush(FILE* f) -{ - int rc = 0; - if (!fio_is_remote_file(f)) - rc = fflush(f); - return rc; -} - -/* Close output stream */ -int -fio_fclose(FILE* f) -{ - return fio_is_remote_file(f) - ? fio_close(fio_fileno(f)) - : fclose(f); -} - /* Close file */ int fio_close(int fd) @@ -704,49 +639,6 @@ fio_close_impl(int fd, int out) IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); } -/* Truncate stdio file */ -int -fio_ftruncate(FILE* f, off_t size) -{ - return fio_is_remote_file(f) - ? fio_truncate(fio_fileno(f), size) - : ftruncate(fileno(f), size); -} - -/* Truncate file - * TODO: make it synchronous - */ -int -fio_truncate(int fd, off_t size) -{ - if (fio_is_remote_fd(fd)) - { - fio_header hdr = { - .cop = FIO_TRUNCATE, - .handle = fd & ~FIO_PIPE_MARKER, - .size = 0, - .arg = size, - }; - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - - return 0; - } - else - { - return ftruncate(fd, size); - } -} - -/* Set position in stdio file */ -int -fio_fseek(FILE* f, off_t offs) -{ - return fio_is_remote_file(f) - ? fio_seek(fio_fileno(f), offs) - : fseek(f, offs, SEEK_SET); -} - /* Set position in file */ /* TODO: make it synchronous or check async error */ int @@ -840,190 +732,6 @@ fio_write_impl(int fd, void const* buf, size_t size, int out) return; } -size_t -fio_fwrite_async(FILE* f, void const* buf, size_t size) -{ - return fio_is_remote_file(f) - ? fio_write_async(fio_fileno(f), buf, size) - : fwrite(buf, 1, size, f); -} - -/* Write data to the file */ -/* TODO: support async report error */ -ssize_t -fio_write_async(int fd, void const* buf, size_t size) -{ - if (size == 0) - return 0; - - if (fio_is_remote_fd(fd)) - { - fio_header hdr = { - .cop = FIO_WRITE_ASYNC, - .handle = fd & ~FIO_PIPE_MARKER, - .size = size, - .arg = 0, - }; - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(fio_stdout, buf, size), size); - return size; - } - else - return durable_write(fd, buf, size); -} - -static void -fio_write_async_impl(int fd, void const* buf, size_t size, int out) -{ - /* Quick exit if agent is tainted */ - if (async_errormsg) - return; - - if (durable_write(fd, buf, size) <= 0) - { - async_errormsg = pgut_malloc(ERRMSG_MAX_LEN); - snprintf(async_errormsg, ERRMSG_MAX_LEN, "%s", strerror(errno)); - } -} - -static int32 -fio_decompress(void* dst, void const* src, size_t size, int compress_alg, char **errormsg) -{ - const char *internal_errormsg = NULL; - int32 uncompressed_size = do_decompress(dst, BLCKSZ, - src, - size, - compress_alg, &internal_errormsg); - - if (uncompressed_size < 0 && internal_errormsg != NULL) - { - *errormsg = pgut_malloc(ERRMSG_MAX_LEN); - snprintf(*errormsg, ERRMSG_MAX_LEN, "An error occured during decompressing block: %s", internal_errormsg); - return -1; - } - - if (uncompressed_size != BLCKSZ) - { - *errormsg = pgut_malloc(ERRMSG_MAX_LEN); - snprintf(*errormsg, ERRMSG_MAX_LEN, "Page uncompressed to %d bytes != BLCKSZ", uncompressed_size); - return -1; - } - return uncompressed_size; -} - -/* Write data to the file */ -ssize_t -fio_fwrite_async_compressed(FILE* f, void const* buf, size_t size, int compress_alg) -{ - if (fio_is_remote_file(f)) - { - fio_header hdr; - - hdr.cop = FIO_WRITE_COMPRESSED_ASYNC; - hdr.handle = fio_fileno(f) & ~FIO_PIPE_MARKER; - hdr.size = size; - hdr.arg = compress_alg; - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(fio_stdout, buf, size), size); - - return size; - } - else - { - char *errormsg = NULL; - char decompressed_buf[BLCKSZ]; - int32 decompressed_size = fio_decompress(decompressed_buf, buf, size, compress_alg, &errormsg); - - if (decompressed_size < 0) - elog(ERROR, "%s", errormsg); - - return fwrite(decompressed_buf, 1, decompressed_size, f); - } -} - -static void -fio_write_compressed_impl(int fd, void const* buf, size_t size, int compress_alg) -{ - int32 decompressed_size; - char decompressed_buf[BLCKSZ]; - - /* If the previous command already have failed, - * then there is no point in bashing a head against the wall - */ - if (async_errormsg) - return; - - /* decompress chunk */ - decompressed_size = fio_decompress(decompressed_buf, buf, size, compress_alg, &async_errormsg); - - if (decompressed_size < 0) - return; - - if (durable_write(fd, decompressed_buf, decompressed_size) <= 0) - { - async_errormsg = pgut_malloc(ERRMSG_MAX_LEN); - snprintf(async_errormsg, ERRMSG_MAX_LEN, "%s", strerror(errno)); - } -} - -/* check if remote agent encountered any error during execution of async operations */ -int -fio_check_error_file(FILE* f, char **errmsg) -{ - if (fio_is_remote_file(f)) - { - fio_header hdr; - - hdr.cop = FIO_GET_ASYNC_ERROR; - hdr.size = 0; - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - - /* check results */ - IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); - - if (hdr.size > 0) - { - *errmsg = pgut_malloc(ERRMSG_MAX_LEN); - IO_CHECK(fio_read_all(fio_stdin, *errmsg, hdr.size), hdr.size); - return 1; - } - } - - return 0; -} - -static void -fio_get_async_error_impl(int out) -{ - fio_header hdr; - hdr.cop = FIO_GET_ASYNC_ERROR; - - /* send error message */ - if (async_errormsg) - { - hdr.size = strlen(async_errormsg) + 1; - - /* send header */ - IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); - - /* send message itself */ - IO_CHECK(fio_write_all(out, async_errormsg, hdr.size), hdr.size); - - //TODO: should we reset the tainted state ? -// pg_free(async_errormsg); -// async_errormsg = NULL; - } - else - { - hdr.size = 0; - /* send header */ - IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); - } -} - /* Read data from file */ ssize_t fio_read(int fd, void* buf, size_t size) @@ -1149,41 +857,6 @@ fio_symlink_impl(const char* target, const char* link_path, bool overwrite, int IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); } -/* Rename file */ -int -fio_rename(fio_location location, const char* old_path, const char* new_path) -{ - if (fio_is_remote(location)) - { - size_t old_path_len = strlen(old_path) + 1; - size_t new_path_len = strlen(new_path) + 1; - fio_header hdr = { - .cop = FIO_RENAME, - .handle = -1, - .size = old_path_len + new_path_len, - .arg = 0, - }; - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(fio_stdout, old_path, old_path_len), old_path_len); - IO_CHECK(fio_write_all(fio_stdout, new_path, new_path_len), new_path_len); - - IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); - Assert(hdr.cop == FIO_RENAME); - - if (hdr.arg != 0) - { - errno = hdr.arg; - return -1; - } - return 0; - } - else - { - return rename(old_path, new_path); - } -} - static void fio_rename_impl(char const* old_path, const char* new_path, int out) { @@ -1415,30 +1088,6 @@ fio_mkdir_impl(const char* path, int mode, bool strict, int out) IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); } -/* Change file mode */ -int -fio_chmod(fio_location location, const char* path, int mode) -{ - if (fio_is_remote(location)) - { - fio_header hdr; - size_t path_len = strlen(path) + 1; - hdr.cop = FIO_CHMOD; - hdr.handle = -1; - hdr.size = path_len; - hdr.arg = mode; - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(fio_stdout, path, path_len), path_len); - - return 0; - } - else - { - return chmod(path, mode); - } -} - #ifdef HAVE_LIBZ #define ZLIB_BUFFER_SIZE (64*1024) @@ -1621,36 +1270,6 @@ fio_gzseek(gzFile f, z_off_t offset, int whence) #endif -/* Send file content - * Note: it should not be used for large files. - */ -static void -fio_load_file(int out, const char* path) -{ - int fd = open(path, O_RDONLY); - fio_header hdr; - void* buf = NULL; - - hdr.cop = FIO_SEND; - hdr.size = 0; - - if (fd >= 0) - { - off_t size = lseek(fd, 0, SEEK_END); - buf = pgut_malloc(size); - lseek(fd, 0, SEEK_SET); - IO_CHECK(fio_read_all(fd, buf, size), size); - hdr.size = size; - SYS_CHECK(close(fd)); - } - IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); - if (buf) - { - IO_CHECK(fio_write_all(out, buf, hdr.size), hdr.size); - free(buf); - } -} - static void fio_send_pio_err(int out, err_i err) { @@ -3077,9 +2696,6 @@ fio_communicate(int in, int out) } errno = 0; /* reset errno */ switch (hdr.cop) { - case FIO_LOAD: /* Send file content */ - fio_load_file(out, buf); - break; case FIO_OPENDIR: /* Open directory for traversal */ dir[hdr.handle] = opendir(buf); hdr.arg = dir[hdr.handle] == NULL ? errno : 0; @@ -3117,12 +2733,6 @@ fio_communicate(int in, int out) // IO_CHECK(fio_write_all(fd[hdr.handle], buf, hdr.size), hdr.size); fio_write_impl(fd[hdr.handle], buf, hdr.size, out); break; - case FIO_WRITE_ASYNC: /* Write to the current position in file */ - fio_write_async_impl(fd[hdr.handle], buf, hdr.size, out); - break; - case FIO_WRITE_COMPRESSED_ASYNC: /* Write to the current position in file */ - fio_write_compressed_impl(fd[hdr.handle], buf, hdr.size, hdr.arg); - break; case FIO_READ: /* Read from the current position in file */ if ((size_t)hdr.arg > buf_size) { buf_size = hdr.arg; @@ -3137,15 +2747,6 @@ fio_communicate(int in, int out) if (hdr.size != 0) IO_CHECK(fio_write_all(out, buf, hdr.size), hdr.size); break; - case FIO_PREAD: /* Read from specified position in file, ignoring pages beyond horizon of delta backup */ - rc = pread(fd[hdr.handle], buf, BLCKSZ, hdr.arg); - hdr.cop = FIO_SEND; - hdr.arg = rc; - hdr.size = rc >= 0 ? rc : 0; - IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); - if (hdr.size != 0) - IO_CHECK(fio_write_all(out, buf, hdr.size), hdr.size); - break; case FIO_AGENT_VERSION: { size_t payload_size = prepare_compatibility_str(buf, buf_size); @@ -3211,11 +2812,6 @@ fio_communicate(int in, int out) IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); } break; - case FIO_ACCESS: /* Check presence of file with specified name */ - hdr.size = 0; - hdr.arg = access(buf, hdr.arg) < 0 ? errno : 0; - IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); - break; case FIO_RENAME: /* Rename file */ /* possible buffer overflow */ fio_rename_impl(buf, buf + strlen(buf) + 1, out); @@ -3229,15 +2825,9 @@ fio_communicate(int in, int out) case FIO_MKDIR: /* Create directory */ fio_mkdir_impl(buf, hdr.arg, hdr.handle == 1, out); break; - case FIO_CHMOD: /* Change file mode */ - SYS_CHECK(chmod(buf, hdr.arg)); - break; case FIO_SEEK: /* Set current position in file */ fio_seek_impl(fd[hdr.handle], hdr.arg); break; - case FIO_TRUNCATE: /* Truncate file */ - SYS_CHECK(ftruncate(fd[hdr.handle], hdr.arg)); - break; case FIO_LIST_DIR: fio_list_dir_impl(out, buf, drive); break; @@ -3266,13 +2856,6 @@ fio_communicate(int in, int out) IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); break; - case FIO_SYNC_FILE: - if (fsync(fd[hdr.handle]) == 0) - hdr.arg = 0; - else - hdr.arg = errno; - IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); - break; case FIO_GET_CRC32: Assert((hdr.arg & GET_CRC32_TRUNCATED) == 0 || (hdr.arg & (GET_CRC32_TRUNCATED|GET_CRC32_DECOMPRESS)) == GET_CRC32_TRUNCATED); @@ -3299,9 +2882,6 @@ fio_communicate(int in, int out) IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); free(buf); return; - case FIO_GET_ASYNC_ERROR: - fio_get_async_error_impl(out); - break; case FIO_READLINK: /* Read content of a symbolic link */ { /* diff --git a/src/utils/file.h b/src/utils/file.h index e5a4776c3..1d3ba0fa9 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -32,40 +32,28 @@ typedef enum FIO_SYMLINK, FIO_REMOVE, FIO_MKDIR, - FIO_CHMOD, FIO_SEEK, - FIO_TRUNCATE, - FIO_PREAD, FIO_READ, - FIO_LOAD, FIO_STAT, FIO_SEND, - FIO_ACCESS, FIO_OPENDIR, FIO_READDIR, FIO_CLOSEDIR, FIO_PAGE, - FIO_WRITE_COMPRESSED_ASYNC, FIO_GET_CRC32, /* used for incremental restore */ FIO_GET_CHECKSUM_MAP, FIO_GET_LSN_MAP, FIO_ERROR, FIO_SEND_FILE, -// FIO_CHUNK, FIO_SEND_FILE_EOF, - FIO_SEND_FILE_CORRUPTION, - FIO_SEND_FILE_HEADERS, /* messages for closing connection */ FIO_DISCONNECT, FIO_DISCONNECTED, FIO_LIST_DIR, FIO_REMOVE_DIR, FIO_CHECK_POSTMASTER, - FIO_GET_ASYNC_ERROR, - FIO_WRITE_ASYNC, FIO_READLINK, - FIO_SYNC_FILE, FIO_SEND_FILE_CONTENT, FIO_PAGE_ZERO, FIO_FILES_ARE_SAME, @@ -152,22 +140,10 @@ extern void fio_get_agent_version(int* protocol, char* payload_buf, size_t pa /* fd-style functions */ extern int fio_open(fio_location location, const char* name, int mode); -extern ssize_t fio_write_async(int fd, void const* buf, size_t size); extern ssize_t fio_read(int fd, void* buf, size_t size); extern int fio_seek(int fd, off_t offs); -extern int fio_truncate(int fd, off_t size); extern int fio_close(int fd); -/* FILE-style functions */ -extern FILE* fio_fopen(fio_location location, const char* name, const char* mode); -extern ssize_t fio_fwrite_async_compressed(FILE* f, void const* buf, size_t size, int compress_alg); -extern size_t fio_fwrite_async(FILE* f, void const* buf, size_t size); -extern int fio_check_error_file(FILE* f, char **errmsg); -extern int fio_fflush(FILE* f); -extern int fio_fseek(FILE* f, off_t offs); -extern int fio_ftruncate(FILE* f, off_t size); -extern int fio_fclose(FILE* f); - /* gzFile-style functions */ #ifdef HAVE_LIBZ extern gzFile fio_gzopen(fio_location location, const char* path, const char* mode, int level); @@ -191,10 +167,8 @@ extern pg_crc32 fio_get_crc32_truncated(fio_location location, const char *file_path, bool missing_ok); -extern int fio_rename(fio_location location, const char* old_path, const char* new_path); extern int fio_symlink(fio_location location, const char* target, const char* link_path, bool overwrite); extern int fio_remove(fio_location location, const char* path, bool missing_ok); -extern int fio_chmod(fio_location location, const char* path, int mode); extern ssize_t fio_readlink(fio_location location, const char *path, char *value, size_t valsiz); extern pid_t fio_check_postmaster(fio_location location, const char *pgdata); From fdab5a91499beb348f513acb8fe5be635c6a85d9 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 15 Dec 2022 09:05:59 +0300 Subject: [PATCH 190/339] prepare to replace fio in parsexlog.c: seekable decompressor Seekable decompressor is a cheat: read from underlying file starts from beginning if we seek backward. But it is best we can do as gzseek does the same. --- src/utils/file.c | 142 +++++++++++++++++++++++++++++++++++++++++++++++ src/utils/file.h | 11 ++++ 2 files changed, 153 insertions(+) diff --git a/src/utils/file.c b/src/utils/file.c index 4e59fa74b..9c063345e 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -3193,12 +3193,29 @@ typedef struct pioGZDecompress { bool ignoreTruncate; } pioGZDecompress; +typedef struct pioGZDecompressWrapperObj { + bool ignoreTruncate; +} pioGZDecompressWrapperObj; + #define kls__pioGZCompress iface__pioFilter, mth(fobjDispose), iface(pioFilter) fobj_klass(pioGZCompress); #define kls__pioGZDecompress iface__pioFilter, mth(fobjDispose), iface(pioFilter) fobj_klass(pioGZDecompress); +#define kls__pioGZDecompressWrapperObj mth(pioWrapRead) +fobj_klass(pioGZDecompressWrapperObj); #endif +typedef struct pioReSeekableReader { + pioReader_i reader; + pioRead_i wrapped; + pioWrapRead_i wrapper; + int64_t pos; + bool closed; + bool had_err; +} pioReSeekableReader; +#define kls__pioReSeekableReader iface__pioReader, mth(fobjDispose) +fobj_klass(pioReSeekableReader); + /* CRC32 counter */ typedef struct pioDevNull { @@ -5379,8 +5396,131 @@ pioGZDecompress_fobjRepr(VSelf) Self(pioGZCompress); return $S("pioGZDecompress"); } + +pioWrapRead_i +pioGZDecompressWrapper(bool ignoreTruncate) +{ + return $bind(pioWrapRead, $alloc(pioGZDecompressWrapperObj, + .ignoreTruncate = ignoreTruncate)); +} + +static pioRead_i +pioGZDecompressWrapperObj_pioWrapRead(VSelf, pioRead_i rdr, err_i* err) +{ + Self(pioGZDecompressWrapperObj); + pioFilter_i flt; + fobj_reset_err(err); + + flt = pioGZDecompressFilter(self->ignoreTruncate); + return pioWrapReadFilter(rdr, flt, CHUNK_SIZE); +} #endif +extern pioReader_i +pioWrapForReSeek(pioReader_i fl, pioWrapRead_i wr) +{ + pioReSeekableReader* reseek; + err_i err; + + reseek = $alloc(pioReSeekableReader, + .reader = $iref(fl), + .wrapper = $iref(wr), + ); + reseek->wrapped = $iref($i(pioWrapRead, wr, + .reader=$reduce(pioRead, fl), + .err=&err)); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "wrap failed"); + + return $bind(pioReader, reseek); +} + +static size_t +pioReSeekableReader_pioRead(VSelf, ft_bytes_t buf, err_i *err) +{ + Self(pioReSeekableReader); + size_t r; + + ft_assert(!self->had_err, "use after error"); + ft_assert(!self->closed, "use after close"); + + r = $i(pioRead, self->wrapped, buf, err); + self->pos += r; + if ($haserr(*err)) + self->had_err = true; + return r; +} + +static err_i +pioReSeekableReader_pioSeek(VSelf, uint64_t pos) +{ + FOBJ_FUNC_ARP(); + Self(pioReSeekableReader); + char buf[4096]; + size_t need, r; + err_i err; + + ft_assert(!self->had_err, "use after error"); + ft_assert(!self->closed, "use after close"); + + if (pos < self->pos) + { + pioRead_i wrapped; + /* had to read from the beginning and reset filter */ + self->had_err = true; + err = $i(pioSeek, self->reader, 0); + if ($haserr(err)) + return $iresult(err); + self->pos = 0; + + wrapped = $i(pioWrapRead, self->wrapper, + .reader = $reduce(pioRead, self->reader), + .err = &err); + if ($haserr(err)) + return $iresult(err); + $iset(&self->wrapped, wrapped); + self->had_err = false; + } + + while (pos > self->pos) + { + need = ft_min(pos - self->pos, sizeof(buf)); + r = $(pioRead, self, ft_bytes(buf, need), .err = &err); + if ($haserr(err)) + { + self->had_err = true; + return $iresult(err); + } + /* lseek/fseek seeks past the file end without error, so we do */ + if (r < need) + break; + } + + return $noerr(); +} + +static err_i +pioReSeekableReader_pioClose(VSelf, bool sync) +{ + Self(pioReSeekableReader); + err_i err; + + err = $i(pioClose, self->reader); + self->closed = true; + return err; +} + +static void +pioReSeekableReader_fobjDispose(VSelf) +{ + Self(pioReSeekableReader); + if (!self->closed) + $i(pioClose, self->reader); + $idel(&self->reader); + $idel(&self->wrapper); + $idel(&self->wrapped); +} + /* Transform filter method */ /* Must count crc32 of new portion of data. No output needed */ static pioFltTransformResult @@ -6047,10 +6187,12 @@ fobj_klass_handle(pioWriteFilter, mth(fobjDispose, fobjRepr)); fobj_klass_handle(pioReadFilter, mth(fobjDispose, fobjRepr)); fobj_klass_handle(pioDevNull); fobj_klass_handle(pioCRC32Counter); +fobj_klass_handle(pioReSeekableReader); #ifdef HAVE_LIBZ fobj_klass_handle(pioGZCompress, mth(fobjRepr)); fobj_klass_handle(pioGZDecompress, mth(fobjRepr)); +fobj_klass_handle(pioGZDecompressWrapperObj); #endif void diff --git a/src/utils/file.h b/src/utils/file.h index 1d3ba0fa9..68a990446 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -396,9 +396,20 @@ extern pioRead_i pioWrapReadFilter(pioRead_i fl, pioFilter_i flt, size_t buf_size); +#define mth__pioWrapRead pioRead_i, (pioRead_i, reader), (err_i*, err) +fobj_method(pioWrapRead); + +/* + * Usefull for seek-able GZip reader - ie imitate gzseek. + * Has same limitations: will read from start if seek-ed back. + */ +extern pioReader_i pioWrapForReSeek(pioReader_i fl, + pioWrapRead_i wr); + #ifdef HAVE_LIBZ extern pioFilter_i pioGZCompressFilter(int level); extern pioFilter_i pioGZDecompressFilter(bool ignoreTruncate); +extern pioWrapRead_i pioGZDecompressWrapper(bool ignoreTruncate); #endif typedef struct pioCRC32Counter pioCRC32Counter; From d175255933d4dbaa6ceea4062399c3002a4bed9d Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 15 Dec 2022 09:51:25 +0300 Subject: [PATCH 191/339] use pioOpenRead and pioWrapForReSeek in SimpleXlogPageRead --- src/parsexlog.c | 136 ++++++++++++++++-------------------------------- 1 file changed, 46 insertions(+), 90 deletions(-) diff --git a/src/parsexlog.c b/src/parsexlog.c index d9d6d1cba..1a5edfbd8 100644 --- a/src/parsexlog.c +++ b/src/parsexlog.c @@ -108,13 +108,9 @@ typedef struct XLogReaderData bool need_switch; - int xlogfile; + pioDrive_i drive; + pioReader_i xlogfile; char xlogpath[MAXPGPATH]; - -#ifdef HAVE_LIBZ - gzFile gz_xlogfile; - char gz_xlogpath[MAXPGPATH]; -#endif } XLogReaderData; /* Function to process a WAL record */ @@ -924,24 +920,6 @@ get_prior_record_lsn(const char *archivedir, XLogRecPtr start_lsn, return res; } -#ifdef HAVE_LIBZ -/* - * Show error during work with compressed file - */ -static const char * -get_gz_error(gzFile gzf) -{ - int errnum; - const char *errmsg; - - errmsg = fio_gzerror(gzf, &errnum); - if (errnum == Z_ERRNO) - return strerror(errno); - else - return errmsg; -} -#endif - /* XLogreader callback function, to read a WAL page */ static int SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, @@ -953,6 +931,9 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, { XLogReaderData *reader_data; uint32 targetPageOff; + FOBJ_FUNC_ARP(); + err_i err; + size_t rd; reader_data = (XLogReaderData *) xlogreader->private_data; targetPageOff = targetPagePtr % wal_seg_size; @@ -1009,25 +990,26 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, if (!reader_data->xlogexists) { char xlogfname[MAXFNAMELEN]; + char gz_file[MAXPGPATH]; char partial_file[MAXPGPATH]; GetXLogFileName(xlogfname, reader_data->tli, reader_data->xlogsegno, wal_seg_size); join_path_components(reader_data->xlogpath, wal_archivedir, xlogfname); - snprintf(reader_data->gz_xlogpath, MAXPGPATH, "%s.gz", reader_data->xlogpath); /* We fall back to using .partial segment in case if we are running * multi-timeline incremental backup right after standby promotion. * TODO: it should be explicitly enabled. */ snprintf(partial_file, MAXPGPATH, "%s.partial", reader_data->xlogpath); + snprintf(gz_file, MAXPGPATH, "%s.gz", reader_data->xlogpath); /* If segment do not exists, but the same * segment with '.partial' suffix does, use it instead */ if (!fileExists(reader_data->xlogpath, FIO_LOCAL_HOST) && fileExists(partial_file, FIO_LOCAL_HOST)) { - snprintf(reader_data->xlogpath, MAXPGPATH, "%s", partial_file); + ft_strlcpy(reader_data->xlogpath, partial_file, MAXPGPATH); } if (fileExists(reader_data->xlogpath, FIO_LOCAL_HOST)) @@ -1036,34 +1018,35 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, reader_data->thread_num, reader_data->xlogpath); reader_data->xlogexists = true; - reader_data->xlogfile = fio_open(FIO_LOCAL_HOST, reader_data->xlogpath, - O_RDONLY | PG_BINARY); - - if (reader_data->xlogfile < 0) + reader_data->xlogfile = $iref($i(pioOpenRead, reader_data->drive, + .path = reader_data->xlogpath, .err = &err)); + if ($haserr(err)) { - elog(WARNING, "Thread [%d]: Could not open WAL segment \"%s\": %s", - reader_data->thread_num, reader_data->xlogpath, - strerror(errno)); + ft_logerr(FT_WARNING, $errmsg(err), "Thread [%d]: Open WAL segment"); return -1; } } #ifdef HAVE_LIBZ /* Try to open compressed WAL segment */ - else if (fileExists(reader_data->gz_xlogpath, FIO_LOCAL_HOST)) + else if (fileExists(gz_file, FIO_LOCAL_HOST)) { + pioReader_i reader; + ft_strlcpy(reader_data->xlogpath, gz_file, MAXPGPATH); + elog(LOG, "Thread [%d]: Opening compressed WAL segment \"%s\"", - reader_data->thread_num, reader_data->gz_xlogpath); + reader_data->thread_num, reader_data->xlogpath); reader_data->xlogexists = true; - reader_data->gz_xlogfile = fio_gzopen(FIO_LOCAL_HOST, reader_data->gz_xlogpath, - "rb", -1); - if (reader_data->gz_xlogfile == NULL) + reader = $i(pioOpenRead, reader_data->drive, + .path = reader_data->xlogpath, .err = &err); + if ($haserr(err)) { - elog(WARNING, "Thread [%d]: Could not open compressed WAL segment \"%s\": %s", - reader_data->thread_num, reader_data->gz_xlogpath, - strerror(errno)); + ft_logerr(FT_WARNING, $errmsg(err), + "Thread [%d]: Open compressed WAL segment"); return -1; } + reader = pioWrapForReSeek(reader, pioGZDecompressWrapper(false)); + reader_data->xlogfile = $iref(reader); } #endif /* Exit without error if WAL segment doesn't exist */ @@ -1090,42 +1073,27 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, } /* Read the requested page */ - if (reader_data->xlogfile != -1) + err = $i(pioSeek, reader_data->xlogfile, targetPageOff); + if ($haserr(err)) { - if (fio_seek(reader_data->xlogfile, (off_t) targetPageOff) < 0) - { - elog(WARNING, "Thread [%d]: Could not seek in WAL segment \"%s\": %s", - reader_data->thread_num, reader_data->xlogpath, strerror(errno)); - return -1; - } + ft_logerr(FT_WARNING, $errmsg(err), "Thread [%d]: Seek in WAL segment", + reader_data->thread_num); + return -1; + } - if (fio_read(reader_data->xlogfile, readBuf, XLOG_BLCKSZ) != XLOG_BLCKSZ) - { - elog(WARNING, "Thread [%d]: Could not read from WAL segment \"%s\": %s", - reader_data->thread_num, reader_data->xlogpath, strerror(errno)); - return -1; - } + rd = $i(pioRead, reader_data->xlogfile, ft_bytes(readBuf, XLOG_BLCKSZ), + .err = &err); + if ($noerr(err) && rd != XLOG_BLCKSZ) + { + err = $err(RT, "Short read from {path}: {size} < XLOG_BLCKSZ", + path(reader_data->xlogpath), size(rd)); } -#ifdef HAVE_LIBZ - else + if ($haserr(err)) { - if (fio_gzseek(reader_data->gz_xlogfile, (z_off_t) targetPageOff, SEEK_SET) == -1) - { - elog(WARNING, "Thread [%d]: Could not seek in compressed WAL segment \"%s\": %s", - reader_data->thread_num, reader_data->gz_xlogpath, - get_gz_error(reader_data->gz_xlogfile)); - return -1; - } - - if (fio_gzread(reader_data->gz_xlogfile, readBuf, XLOG_BLCKSZ) != XLOG_BLCKSZ) - { - elog(WARNING, "Thread [%d]: Could not read from compressed WAL segment \"%s\": %s", - reader_data->thread_num, reader_data->gz_xlogpath, - get_gz_error(reader_data->gz_xlogfile)); - return -1; - } + ft_logerr(FT_WARNING, $errmsg(err), "Thread [%d]: Read from WAL segment", + reader_data->thread_num); + return -1; } -#endif memcpy(reader_data->page_buf, readBuf, XLOG_BLCKSZ); reader_data->prev_page_off = targetPageOff; @@ -1152,7 +1120,8 @@ InitXLogPageRead(XLogReaderData *reader_data, const char *archivedir, MemSet(reader_data, 0, sizeof(XLogReaderData)); reader_data->tli = tli; - reader_data->xlogfile = -1; + reader_data->drive = pioDriveForLocation(FIO_BACKUP_HOST); + $setNULL(&reader_data->xlogfile); if (allocate_reader) { @@ -1715,18 +1684,11 @@ CleanupXLogPageRead(XLogReaderState *xlogreader) XLogReaderData *reader_data; reader_data = (XLogReaderData *) xlogreader->private_data; - if (reader_data->xlogfile >= 0) + if (!$isNULL(reader_data->xlogfile)) { - fio_close(reader_data->xlogfile); - reader_data->xlogfile = -1; + $i(pioClose, reader_data->xlogfile); + $idel(&reader_data->xlogfile); } -#ifdef HAVE_LIBZ - else if (reader_data->gz_xlogfile != NULL) - { - fio_gzclose(reader_data->gz_xlogfile); - reader_data->gz_xlogfile = NULL; - } -#endif reader_data->prev_page_off = 0; reader_data->xlogexists = false; } @@ -1743,16 +1705,10 @@ PrintXLogCorruptionMsg(XLogReaderData *reader_data, int elevel) if (!reader_data->xlogexists) elog(elevel, "Thread [%d]: WAL segment \"%s\" is absent", reader_data->thread_num, reader_data->xlogpath); - else if (reader_data->xlogfile != -1) + else elog(elevel, "Thread [%d]: Possible WAL corruption. " "Error has occured during reading WAL segment \"%s\"", reader_data->thread_num, reader_data->xlogpath); -#ifdef HAVE_LIBZ - else if (reader_data->gz_xlogfile != NULL) - elog(elevel, "Thread [%d]: Possible WAL corruption. " - "Error has occured during reading WAL segment \"%s\"", - reader_data->thread_num, reader_data->gz_xlogpath); -#endif } else { From d231fc6be58b6e8bf321e3368592d58e46660ce4 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 15 Dec 2022 10:30:35 +0300 Subject: [PATCH 192/339] fix pioReadFilter_pioRead 1. without this change it attempt to another one pioRead on wrapped even if we already filled wbuf. 2. this lead to strange decompression error in SimpleXlogPageRead. I didn't investigate it properly, but this change fixes things. --- src/utils/file.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/utils/file.c b/src/utils/file.c index 9c063345e..fbcd1964f 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -4946,6 +4946,9 @@ pioReadFilter_pioRead(VSelf, ft_bytes_t wbuf, err_i *err) memmove(self->buffer, rbuf.ptr, rbuf.len); self->len = rbuf.len; + if (wbuf.len == 0) + break; + /* feed buffer */ rbuf = ft_bytes(self->buffer, self->capa); ft_bytes_consume(&rbuf, self->len); From 692f0c0e7b92a021349b4062e07c7f4dbfb58f55 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 15 Dec 2022 11:01:42 +0300 Subject: [PATCH 193/339] SimpleXlogPageRead: replace fileExists with pioExists --- src/parsexlog.c | 35 ++++++++++++++++++++--------------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/src/parsexlog.c b/src/parsexlog.c index 1a5edfbd8..9d399f3cf 100644 --- a/src/parsexlog.c +++ b/src/parsexlog.c @@ -992,6 +992,7 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, char xlogfname[MAXFNAMELEN]; char gz_file[MAXPGPATH]; char partial_file[MAXPGPATH]; + err_i err2 = $noerr(); GetXLogFileName(xlogfname, reader_data->tli, reader_data->xlogsegno, wal_seg_size); @@ -1006,13 +1007,19 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, /* If segment do not exists, but the same * segment with '.partial' suffix does, use it instead */ - if (!fileExists(reader_data->xlogpath, FIO_LOCAL_HOST) && - fileExists(partial_file, FIO_LOCAL_HOST)) + if (!$i(pioExists, reader_data->drive, reader_data->xlogpath, .err=&err) && + $i(pioExists, reader_data->drive, partial_file, .err=&err2)) { ft_strlcpy(reader_data->xlogpath, partial_file, MAXPGPATH); } + else if ($haserr(err) || $haserr(err2)) + { + ft_logerr(FT_WARNING, $errmsg(fobj_err_combine(err, err2)), + "Thread [%d]: Looking for WAL segment"); + return -1; + } - if (fileExists(reader_data->xlogpath, FIO_LOCAL_HOST)) + if ($i(pioExists, reader_data->drive, reader_data->xlogpath, .err=&err)) { elog(LOG, "Thread [%d]: Opening WAL segment \"%s\"", reader_data->thread_num, reader_data->xlogpath); @@ -1020,15 +1027,11 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, reader_data->xlogexists = true; reader_data->xlogfile = $iref($i(pioOpenRead, reader_data->drive, .path = reader_data->xlogpath, .err = &err)); - if ($haserr(err)) - { - ft_logerr(FT_WARNING, $errmsg(err), "Thread [%d]: Open WAL segment"); - return -1; - } } #ifdef HAVE_LIBZ /* Try to open compressed WAL segment */ - else if (fileExists(gz_file, FIO_LOCAL_HOST)) + else if ($noerr(err) && + $i(pioExists, reader_data->drive, gz_file, .err=&err)) { pioReader_i reader; ft_strlcpy(reader_data->xlogpath, gz_file, MAXPGPATH); @@ -1039,16 +1042,18 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, reader_data->xlogexists = true; reader = $i(pioOpenRead, reader_data->drive, .path = reader_data->xlogpath, .err = &err); - if ($haserr(err)) + if ($noerr(err)) { - ft_logerr(FT_WARNING, $errmsg(err), - "Thread [%d]: Open compressed WAL segment"); - return -1; + reader = pioWrapForReSeek(reader, pioGZDecompressWrapper(false)); + reader_data->xlogfile = $iref(reader); } - reader = pioWrapForReSeek(reader, pioGZDecompressWrapper(false)); - reader_data->xlogfile = $iref(reader); } #endif + if ($haserr(err)) + { + ft_logerr(FT_WARNING, $errmsg(err), "Thread [%d]: Open WAL segment"); + return -1; + } /* Exit without error if WAL segment doesn't exist */ if (!reader_data->xlogexists) return -1; From 8efe4d4bc7d8c8d1b6f6728251f976cafe31cc60 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 15 Dec 2022 11:09:25 +0300 Subject: [PATCH 194/339] remove fio fd-style functions and gzFile-style functions as unused --- src/utils/file.c | 322 +---------------------------------------------- src/utils/file.h | 16 --- 2 files changed, 3 insertions(+), 335 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index fbcd1964f..a6dc8a2da 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -106,9 +106,6 @@ struct __attribute__((packed)) fio_req_open_write { bool exclusive; }; -/* Convert FIO pseudo handle to index in file descriptor array */ -#define fio_fileno(f) (((size_t)f - 1) | FIO_PIPE_MARKER) - #if defined(WIN32) #undef open #undef fopen @@ -186,13 +183,6 @@ fio_error(int rc, int size, const char* file, int line) } } -/* Check if file descriptor is local or remote (created by FIO) */ -static bool -fio_is_remote_fd(int fd) -{ - return (fd & FIO_PIPE_MARKER) != 0; -} - #ifdef WIN32 /* TODO: use real pread on Linux */ static ssize_t @@ -525,47 +515,6 @@ fio_closedir(DIR *dir) } } -/* Open file */ -int -fio_open(fio_location location, const char* path, int mode) -{ - int fd; - if (fio_is_remote(location)) - { - int handle; - fio_header hdr; - - handle = find_free_handle(); - hdr.cop = FIO_OPEN; - hdr.handle = handle; - hdr.size = strlen(path) + 1; - hdr.arg = mode; -// hdr.arg = mode & ~O_EXCL; -// elog(INFO, "PATH: %s MODE: %i, %i", path, mode, O_EXCL); -// elog(INFO, "MODE: %i", hdr.arg); - set_handle(handle); - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(fio_stdout, path, hdr.size), hdr.size); - - /* check results */ - IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); - - if (hdr.arg != 0) - { - errno = hdr.arg; - unset_handle(hdr.handle); - return -1; - } - fd = handle | FIO_PIPE_MARKER; - } - else - { - fd = open(path, mode, FILE_PERMISSION); - } - return fd; -} - /* Close ssh session */ void @@ -587,40 +536,6 @@ fio_disconnect(void) } } -/* Close file */ -int -fio_close(int fd) -{ - if (fio_is_remote_fd(fd)) - { - fio_header hdr = { - .cop = FIO_CLOSE, - .handle = fd & ~FIO_PIPE_MARKER, - .size = 0, - .arg = 0, - }; - - unset_handle(hdr.handle); - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - - /* Wait for response */ - IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); - Assert(hdr.cop == FIO_CLOSE); - - if (hdr.arg != 0) - { - errno = hdr.arg; - return -1; - } - - return 0; - } - else - { - return close(fd); - } -} - /* Close remote file implementation */ static void fio_close_impl(int fd, int out) @@ -639,30 +554,6 @@ fio_close_impl(int fd, int out) IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); } -/* Set position in file */ -/* TODO: make it synchronous or check async error */ -int -fio_seek(int fd, off_t offs) -{ - if (fio_is_remote_fd(fd)) - { - fio_header hdr; - - hdr.cop = FIO_SEEK; - hdr.handle = fd & ~FIO_PIPE_MARKER; - hdr.size = 0; - hdr.arg = offs; - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - - return 0; - } - else - { - return lseek(fd, offs, SEEK_SET); - } -} - /* seek is asynchronous */ static void fio_seek_impl(int fd, off_t offs) @@ -732,34 +623,6 @@ fio_write_impl(int fd, void const* buf, size_t size, int out) return; } -/* Read data from file */ -ssize_t -fio_read(int fd, void* buf, size_t size) -{ - if (fio_is_remote_fd(fd)) - { - fio_header hdr = { - .cop = FIO_READ, - .handle = fd & ~FIO_PIPE_MARKER, - .size = 0, - .arg = size, - }; - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - - IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); - Assert(hdr.cop == FIO_SEND); - IO_CHECK(fio_read_all(fio_stdin, buf, hdr.size), hdr.size); - errno = hdr.arg; - - return hdr.size; - } - else - { - return read(fd, buf, size); - } -} - /* * Read value of a symbolic link * this is a wrapper about readlink() syscall @@ -1088,188 +951,6 @@ fio_mkdir_impl(const char* path, int mode, bool strict, int out) IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); } -#ifdef HAVE_LIBZ - -#define ZLIB_BUFFER_SIZE (64*1024) -#define MAX_WBITS 15 /* 32K LZ77 window */ -#define DEF_MEM_LEVEL 8 -/* last bit used to differenciate remote gzFile from local gzFile - * TODO: this is insane, we should create our own scructure for this, - * not flip some bits in someone's else and hope that it will not break - * between zlib versions. - */ -#define FIO_GZ_REMOTE_MARKER 1 - -typedef struct fioGZFile -{ - z_stream strm; - int fd; - int errnum; - bool eof; - Bytef buf[ZLIB_BUFFER_SIZE]; -} fioGZFile; - -/* On error returns NULL and errno should be checked */ -gzFile -fio_gzopen(fio_location location, const char* path, const char* mode, int level) -{ - int rc; - - if (strchr(mode, 'w') != NULL) /* compress */ - { - Assert(false); - elog(ERROR, "fio_gzopen(\"wb\") is not implemented"); - } - - if (fio_is_remote(location)) - { - fioGZFile* gz = (fioGZFile*) pgut_malloc(sizeof(fioGZFile)); - memset(&gz->strm, 0, sizeof(gz->strm)); - gz->eof = 0; - gz->errnum = Z_OK; - gz->strm.next_in = gz->buf; - gz->strm.avail_in = ZLIB_BUFFER_SIZE; - rc = inflateInit2(&gz->strm, 15 + 16); - gz->strm.avail_in = 0; - if (rc == Z_OK) - { - gz->fd = fio_open(location, path, O_RDONLY | PG_BINARY); - if (gz->fd < 0) - { - free(gz); - return NULL; - } - } - if (rc != Z_OK) - { - elog(ERROR, "zlib internal error when opening file %s: %s", - path, gz->strm.msg); - } - return (gzFile)((size_t)gz + FIO_GZ_REMOTE_MARKER); - } - else - { - gzFile file; - file = gzopen(path, mode); - if (file != NULL && level != Z_DEFAULT_COMPRESSION) - { - if (gzsetparams(file, level, Z_DEFAULT_STRATEGY) != Z_OK) - elog(ERROR, "Cannot set compression level %d: %s", - level, strerror(errno)); - } - return file; - } -} - -int -fio_gzread(gzFile f, void *buf, unsigned size) -{ - if ((size_t)f & FIO_GZ_REMOTE_MARKER) - { - int rc; - fioGZFile* gz = (fioGZFile*)((size_t)f - FIO_GZ_REMOTE_MARKER); - - if (gz->eof) - { - return 0; - } - - gz->strm.next_out = (Bytef *)buf; - gz->strm.avail_out = size; - - while (1) - { - if (gz->strm.avail_in != 0) /* If there is some data in receiver buffer, then decompress it */ - { - rc = inflate(&gz->strm, Z_NO_FLUSH); - if (rc == Z_STREAM_END) - { - gz->eof = 1; - } - else if (rc != Z_OK) - { - gz->errnum = rc; - return -1; - } - if (gz->strm.avail_out != size) - { - return size - gz->strm.avail_out; - } - if (gz->strm.avail_in == 0) - { - gz->strm.next_in = gz->buf; - } - } - else - { - gz->strm.next_in = gz->buf; - } - rc = fio_read(gz->fd, gz->strm.next_in + gz->strm.avail_in, - gz->buf + ZLIB_BUFFER_SIZE - gz->strm.next_in - gz->strm.avail_in); - if (rc > 0) - { - gz->strm.avail_in += rc; - } - else - { - if (rc == 0) - { - gz->eof = 1; - } - return rc; - } - } - } - else - { - return gzread(f, buf, size); - } -} - -int -fio_gzclose(gzFile f) -{ - if ((size_t)f & FIO_GZ_REMOTE_MARKER) - { - fioGZFile* gz = (fioGZFile*)((size_t)f - FIO_GZ_REMOTE_MARKER); - int rc; - inflateEnd(&gz->strm); - rc = fio_close(gz->fd); - free(gz); - return rc; - } - else - { - return gzclose(f); - } -} - -const char* -fio_gzerror(gzFile f, int *errnum) -{ - if ((size_t)f & FIO_GZ_REMOTE_MARKER) - { - fioGZFile* gz = (fioGZFile*)((size_t)f - FIO_GZ_REMOTE_MARKER); - if (errnum) - *errnum = gz->errnum; - return gz->strm.msg; - } - else - { - return gzerror(f, errnum); - } -} - -z_off_t -fio_gzseek(gzFile f, z_off_t offset, int whence) -{ - Assert(!((size_t)f & FIO_GZ_REMOTE_MARKER)); - return gzseek(f, offset, whence); -} - - -#endif - static void fio_send_pio_err(int out, err_i err) { @@ -5157,6 +4838,9 @@ pioWriteFilter_fobjRepr(VSelf) } #ifdef HAVE_LIBZ +#define MAX_WBITS 15 /* 32K LZ77 window */ +#define DEF_MEM_LEVEL 8 + static err_i newGZError(const char *gzmsg, int gzerrno) { diff --git a/src/utils/file.h b/src/utils/file.h index 68a990446..4975ca3e7 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -125,7 +125,6 @@ extern void fio_communicate(int in, int out); extern void fio_disconnect(void); #define FIO_FDMAX 64 -#define FIO_PIPE_MARKER 0x40000000 /* Check if FILE handle is local or remote (created by FIO) */ #define fio_is_remote_file(file) ((size_t)(file) <= FIO_FDMAX) @@ -138,21 +137,6 @@ extern void fio_error(int rc, int size, const char* file, int line); extern void fio_get_agent_version(int* protocol, char* payload_buf, size_t payload_buf_size); -/* fd-style functions */ -extern int fio_open(fio_location location, const char* name, int mode); -extern ssize_t fio_read(int fd, void* buf, size_t size); -extern int fio_seek(int fd, off_t offs); -extern int fio_close(int fd); - -/* gzFile-style functions */ -#ifdef HAVE_LIBZ -extern gzFile fio_gzopen(fio_location location, const char* path, const char* mode, int level); -extern int fio_gzclose(gzFile file); -extern int fio_gzread(gzFile f, void *buf, unsigned size); -extern z_off_t fio_gzseek(gzFile f, z_off_t offset, int whence); -extern const char* fio_gzerror(gzFile file, int *errnum); -#endif - /* DIR-style functions */ extern DIR* fio_opendir(fio_location location, const char* path); extern struct dirent * fio_readdir(DIR *dirp); From 401bccf0b7077b37a5898416d7a9aa087f50fb3d Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 15 Dec 2022 18:57:40 +0300 Subject: [PATCH 195/339] fu_util/fobj: fix autorelease pool cleanup in glibc glibc's pthread_exit runs stack unwinding for C++ destructor's handling, and calls destructors for pthread specifics only after unwinding. This way fobj_destroy_thread_AR is called too lately. We have to compile with '-fexception' to register all our attribute((cleanup)) for autorelease pools as "C++ desctructors". Found in debug session with Sergey Fucanchik --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index e34d49934..30510d15b 100644 --- a/Makefile +++ b/Makefile @@ -81,6 +81,7 @@ PG_CPPFLAGS += -Isrc endif override CPPFLAGS := -DFRONTEND $(CPPFLAGS) $(PG_CPPFLAGS) PG_LIBS_INTERNAL = $(libpq_pgport) ${PTHREAD_CFLAGS} +CFLAGS += -fexception # additional dependencies on borrowed files src/backup.o src/catchup.o src/pg_probackup.o: src/compatibility/streamutil.h From dcf90d689c346ad03fbfc000ea4071014c69f959 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 15 Dec 2022 18:59:16 +0300 Subject: [PATCH 196/339] merge.c: unused buffer --- src/merge.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/merge.c b/src/merge.c index eeda3e528..bbd6b53e6 100644 --- a/src/merge.c +++ b/src/merge.c @@ -1178,7 +1178,6 @@ merge_data_file(parray *parent_chain, pgBackup *full_backup, const char *full_database_dir, bool use_bitmap, bool is_retry, bool no_sync) { - char *buffer = pgut_malloc(STDIO_BUFSIZE); char to_fullpath[MAXPGPATH]; char to_fullpath_tmp1[MAXPGPATH]; /* used for restore */ char to_fullpath_tmp2[MAXPGPATH]; /* used for backup */ @@ -1211,8 +1210,6 @@ merge_data_file(parray *parent_chain, pgBackup *full_backup, if ($haserr(err)) ft_logerr(FT_FATAL, $errmsg(err), "Closing target file"); - pg_free(buffer); - /* tmp_file->size is greedy, even if there is single 8KB block in file, * that was overwritten twice during restore_data_file, we would assume that its size is * 16KB. From b84bed3ecb3b29fe04ea6ca002cae3dabcbffa89 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 15 Dec 2022 19:01:03 +0300 Subject: [PATCH 197/339] fix Makefile --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 30510d15b..1ea388927 100644 --- a/Makefile +++ b/Makefile @@ -81,7 +81,7 @@ PG_CPPFLAGS += -Isrc endif override CPPFLAGS := -DFRONTEND $(CPPFLAGS) $(PG_CPPFLAGS) PG_LIBS_INTERNAL = $(libpq_pgport) ${PTHREAD_CFLAGS} -CFLAGS += -fexception +CFLAGS += -fexceptions # additional dependencies on borrowed files src/backup.o src/catchup.o src/pg_probackup.o: src/compatibility/streamutil.h From 4b4638201417755419dcd0f58ab6fdf657430331 Mon Sep 17 00:00:00 2001 From: Sergey Fukanchik Date: Thu, 15 Dec 2022 19:31:12 +0300 Subject: [PATCH 198/339] fix error message in merge_test --- tests/merge_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/merge_test.py b/tests/merge_test.py index 66ccad78e..6fda0a31b 100644 --- a/tests/merge_test.py +++ b/tests/merge_test.py @@ -2579,7 +2579,7 @@ def test_missing_data_file(self): logfile_content = f.read() self.assertIn( - 'ERROR: Cannot open backup file "{0}": No such file or directory'.format(file_to_remove), + 'ERROR: Open backup file: Cannot open file "{0}": No such file or directory'.format(file_to_remove), logfile_content) # @unittest.skip("skip") From f8fbbbbed3c3f96ad129e0d8cab9804314ca2dbc Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 16 Dec 2022 01:57:41 +0300 Subject: [PATCH 199/339] ... --- src/fu_util/impl/fo_impl.h | 2 +- src/fu_util/test/fm.c | 316 +++++++++++++++++++++++++++++++++++++ 2 files changed, 317 insertions(+), 1 deletion(-) create mode 100644 src/fu_util/test/fm.c diff --git a/src/fu_util/impl/fo_impl.h b/src/fu_util/impl/fo_impl.h index 3cb1b2edf..f75470328 100644 --- a/src/fu_util/impl/fo_impl.h +++ b/src/fu_util/impl/fo_impl.h @@ -437,7 +437,7 @@ typedef struct { fobj__nm_invoke(meth)(self, fobj_self_klass, fobj_pass_params(meth, __VA_ARGS__)) #define fobj_call_super(meth, _klassh, self, ...) \ - fobj__nm_invoke(meth)(self, _klassh, true), fobj_pass_params(meth, __VA_ARGS__)) + fobj__nm_invoke(meth)(self, _klassh, fobj_pass_params(meth, __VA_ARGS__)) #define fobj_iface_call(meth, iface, ...) \ fobj_call(meth, (fobj_t)(iface).fobj__nm_has(meth), __VA_ARGS__) diff --git a/src/fu_util/test/fm.c b/src/fu_util/test/fm.c new file mode 100644 index 000000000..f42693802 --- /dev/null +++ b/src/fu_util/test/fm.c @@ -0,0 +1,316 @@ +#include + +#ifndef __TINYC__ + #define AssertEq(x, v, name) _Static_assert(x == v, name) + #define AssertEqStr(x, str, name) _Static_assert(__builtin_strcmp(x, str) == 0, name) + #define AssertNqStr(x, str, name) _Static_assert(__builtin_strcmp(x, str) != 0, name) +#else + #ifdef NDEBUG + #undef NDEBUG + #endif + #include + #include + #define AssertEq(x, v, name) assert(x == v) + #define AssertEqStr(x, str, name) assert(strcmp(x, str) == 0) + #define AssertNqStr(x, str, name) assert(strcmp(x, str) != 0) +#endif + +int main(void) { + +#define asdfhjkl 99 +AssertEq(fm_cat(asdf,hjkl), 99, "fm_cat"); +AssertEq(fm_cat(fm_cat(as,df),fm_cat(hj,kl)), 99, "fm_cat(fm_cat)"); +AssertEq(fm_cat3(as,dfhj,kl), 99, "fm_cat3"); +AssertEq(fm_cat4(as,df,hj,kl), 99, "fm_cat4"); + +AssertEqStr(fm_str(1), "1", "fm_str"); +AssertEqStr(fm_str(1,2), "1,2", "fm_str"); +AssertEqStr(fm_str(1, 2), "1, 2", "fm_str"); +AssertEqStr(fm_str(1, 2), "1, 2", "fm_str"); +AssertEqStr(fm_str(1 , 2), "1 , 2", "fm_str"); +AssertEqStr(fm_str(1, 2 ), "1, 2", "fm_str"); +AssertEqStr(fm_str(fm_cat(1,2)), "12", "fm_str"); +AssertEqStr(fm_str(fm_cat(1, 2 )), "12", "fm_str"); +AssertEqStr(fm_str(fm_cat(x, y )), "xy", "fm_str"); +AssertEqStr(fm_str(fm_cat(x , y )), "xy", "fm_str"); +AssertEqStr(fm_str(fm_cat3(1,2,3)), "123", "fm_str"); +AssertEqStr(fm_str(fm_cat3(1, 2 , 3)), "123", "fm_str"); +AssertEqStr(fm_str(fm_cat3(x, y , z)), "xyz", "fm_str"); +AssertEqStr(fm_str(fm_cat3(x , y ,z)), "xyz", "fm_str"); + +AssertNqStr(fm_str(fm_uniq(x)), fm_str(fm_uniq(x)), "fm_uniq"); + +AssertEqStr(fm_str(fm_expand()), "", "fm_expand"); +AssertEqStr(fm_str(fm_expand(1)), "1", "fm_expand"); +AssertEqStr(fm_str(fm_expand( 1 )), "1", "fm_expand"); +AssertEqStr(fm_str( fm_expand( 1 ) ), "1", "fm_expand"); +AssertEqStr(fm_str(fm_expand(1,2)), "1,2", "fm_expand"); +AssertEqStr(fm_str(fm_expand( 1 , 2 )), "1 , 2", "fm_expand"); +AssertEqStr(fm_str( fm_expand( 1 , 2) ), "1 , 2", "fm_expand"); + +AssertEqStr(fm_str(fm_empty()), "", "fm_empty"); +AssertEqStr(fm_str(fm_empty(1)), "", "fm_empty"); +AssertEqStr(fm_str(fm_empty(1 , 3)), "", "fm_empty"); + +AssertEqStr(fm_str(fm_comma), "fm_comma", "fm_comma"); +AssertEqStr(fm_str(fm_comma()), ",", "fm_comma"); +AssertEqStr(fm_str(fm_comma(xx,xx)), ",", "fm_comma"); +AssertEqStr(fm_str(fm__comma), ",", "fm_comma"); + +AssertEqStr(fm_str(fm_apply(fm_expand)), "", "fm_apply"); +AssertEqStr(fm_str(fm_apply(fm_expand, 1)), "1", "fm_apply"); +AssertEqStr(fm_str(fm_apply(fm_expand, 1, 2)), "1, 2", "fm_apply"); +AssertEqStr(fm_str(fm_apply(fm_expand, 1,2)), "1,2", "fm_apply"); +AssertEqStr(fm_str(fm_apply(fm_expand, 1 ,2)), "1 ,2", "fm_apply"); +AssertEqStr(fm_str(fm_apply(fm_cat, 1 ,2)), "12", "fm_apply"); +AssertEqStr(fm_str(fm_apply(fm_cat, 1, 2)), "12", "fm_apply"); +AssertEqStr(fm_str(fm_apply(fm_cat3, x, y, z)), "xyz", "fm_apply"); +AssertEqStr(fm_str(fm_apply(fm_comma, ())), ",", "fm_apply"); + +AssertEq(fm_compl(1), 0, "fm_compl"); +AssertEq(fm_compl(0), 1, "fm_compl"); +AssertEq(fm_compl(fm_true), 0, "fm_compl"); +AssertEq(fm_compl(fm_false), 1, "fm_compl"); + +AssertEq(fm_and(0, 0), 0, "fm_and"); +AssertEq(fm_and(0, 1), 0, "fm_and"); +AssertEq(fm_and(1, 0), 0, "fm_and"); +AssertEq(fm_and(1, 1), 1, "fm_and"); +AssertEq(fm_and(fm_false, fm_false), fm_false, "fm_and"); +AssertEq(fm_and(fm_false, fm_true), fm_false, "fm_and"); +AssertEq(fm_and(fm_true, fm_false), fm_false, "fm_and"); +AssertEq(fm_and(fm_true, fm_true), fm_true, "fm_and"); + +AssertEq(fm_or(0, 0), 0, "fm_or"); +AssertEq(fm_or(0, 1), 1, "fm_or"); +AssertEq(fm_or(1, 0), 1, "fm_or"); +AssertEq(fm_or(1, 1), 1, "fm_or"); +AssertEq(fm_or(fm_false, fm_false), fm_false, "fm_or"); +AssertEq(fm_or(fm_false, fm_true), fm_true, "fm_or"); +AssertEq(fm_or(fm_true, fm_false), fm_true, "fm_or"); +AssertEq(fm_or(fm_true, fm_true), fm_true, "fm_or"); + +AssertEq(fm_nand(0, 0), 1, "fm_nand"); +AssertEq(fm_nand(0, 1), 1, "fm_nand"); +AssertEq(fm_nand(1, 0), 1, "fm_nand"); +AssertEq(fm_nand(1, 1), 0, "fm_nand"); +AssertEq(fm_nand(fm_false, fm_false), fm_true, "fm_nand"); +AssertEq(fm_nand(fm_false, fm_true), fm_true, "fm_nand"); +AssertEq(fm_nand(fm_true, fm_false), fm_true, "fm_nand"); +AssertEq(fm_nand(fm_true, fm_true), fm_false, "fm_nand"); + +AssertEq(fm_nor(0, 0), 1, "fm_nor"); +AssertEq(fm_nor(0, 1), 0, "fm_nor"); +AssertEq(fm_nor(1, 0), 0, "fm_nor"); +AssertEq(fm_nor(1, 1), 0, "fm_nor"); +AssertEq(fm_nor(fm_false, fm_false), fm_true, "fm_nor"); +AssertEq(fm_nor(fm_false, fm_true), fm_false, "fm_nor"); +AssertEq(fm_nor(fm_true, fm_false), fm_false, "fm_nor"); +AssertEq(fm_nor(fm_true, fm_true), fm_false, "fm_nor"); + +AssertEq(fm_xor(0, 0), 0, "fm_xor"); +AssertEq(fm_xor(0, 1), 1, "fm_xor"); +AssertEq(fm_xor(1, 0), 1, "fm_xor"); +AssertEq(fm_xor(1, 1), 0, "fm_xor"); +AssertEq(fm_xor(fm_false, fm_false), fm_false, "fm_xor"); +AssertEq(fm_xor(fm_false, fm_true), fm_true, "fm_xor"); +AssertEq(fm_xor(fm_true, fm_false), fm_true, "fm_xor"); +AssertEq(fm_xor(fm_true, fm_true), fm_false, "fm_xor"); + +AssertEq(fm_if(fm_true, 3, 4), 3, "fm_if"); +AssertEq(fm_if(fm_false, 3, 4), 4, "fm_if"); +AssertEqStr(fm_str(fm_if(fm_false, 3, 4)), "4", "fm_if"); +AssertEqStr(fm_str(fm_if(fm_false, 3, 4, 5)), "4, 5", "fm_if"); + +AssertEqStr(fm_str(fm_when(fm_true)(3, 4)), "3, 4", "fm_when"); +AssertEqStr(fm_str(fm_when(fm_false)(3, 4)), "", "fm_when"); + +AssertEqStr(fm_str(fm_iif(fm_true)(3, 4)(5, 6)), "3, 4", "fm_iif"); +AssertEqStr(fm_str(fm_iif(fm_false)(3, 4)(5, 6)), "5, 6", "fm_iif"); + +#define COMPARE_FOO(x) x +#define COMPARE_BAR(x) x +AssertEq(fm_equal(FOO, FOO), 1, "fm_equal"); +AssertEq(fm_equal(BAR, BAR), 1, "fm_equal"); +AssertEq(fm_equal(FOO, BAR), 0, "fm_equal"); +AssertEq(fm_equal(BAR, FOO), 0, "fm_equal"); +AssertEq(fm_equal(BAR, BAZ), 0, "fm_equal"); +AssertEq(fm_equal(BAZ, BAR), 0, "fm_equal"); +AssertEq(fm_equal(BAZ, BAD), 0, "fm_equal"); + +AssertEq(fm_not_equal(FOO, FOO), 0, "fm_not_equal"); +AssertEq(fm_not_equal(BAR, BAR), 0, "fm_not_equal"); +AssertEq(fm_not_equal(FOO, BAR), 1, "fm_not_equal"); +AssertEq(fm_not_equal(BAR, FOO), 1, "fm_not_equal"); +AssertEq(fm_not_equal(BAR, BAZ), 1, "fm_not_equal"); +AssertEq(fm_not_equal(BAZ, BAR), 1, "fm_not_equal"); +AssertEq(fm_not_equal(BAZ, BAD), 1, "fm_not_equal"); + +AssertEq(fm_head(2), 2, "fm_head"); +AssertEq(fm_head(2, 3), 2, "fm_head"); +AssertEq(fm_head(2, 3, 4), 2, "fm_head"); +AssertEq(fm_head(2, 3, 4), 2, "fm_head"); +AssertEqStr(fm_str(fm_head()), "", "fm_head"); +AssertEqStr(fm_str(fm_head(, 1)), "", "fm_head"); +AssertEqStr(fm_str(fm_head(fm__comma)), "", "fm_head"); +AssertEqStr(fm_str(fm_head(fm__comma, 1)), "", "fm_head"); +AssertEqStr(fm_str(fm_head(fm_comma(), 1)), "", "fm_head"); +AssertEqStr(fm_str(fm_tail(2)), "", "fm_head"); +AssertEqStr(fm_str(fm_tail(2, 3)), "3", "fm_head"); +AssertEqStr(fm_str(fm_tail(2, 3, 4)), "3, 4", "fm_head"); + +AssertEq(fm_va_single(), 1, "fm_va_single"); +AssertEq(fm_va_single(1), 1, "fm_va_single"); +AssertEq(fm_va_single(,), 0, "fm_va_single"); +AssertEq(fm_va_single(fm_expand()), 1, "fm_va_single"); +AssertEq(fm_va_single(fm_expand(1)), 1, "fm_va_single"); +AssertEq(fm_va_single(fm_expand(,)), 0, "fm_va_single"); + +AssertEq(fm_va_many(), 0, "fm_va_many"); +AssertEq(fm_va_many(1), 0, "fm_va_many"); +AssertEq(fm_va_many(,), 1, "fm_va_many"); +AssertEq(fm_va_many(fm_expand()), 0, "fm_va_many"); +AssertEq(fm_va_many(fm_expand(1)), 0, "fm_va_many"); +AssertEq(fm_va_many(fm_expand(,)), 1, "fm_va_many"); + +AssertEq(fm_no_va(), 1, "fm_no_va"); +AssertEq(fm_no_va(fm_empty()), 1, "fm_no_va"); +AssertEq(fm_no_va(fm_empty(1)), 1, "fm_no_va"); +AssertEq(fm_no_va(1), 0, "fm_no_va"); +AssertEq(fm_no_va(,), 0, "fm_no_va"); +AssertEq(fm_no_va(,1), 0, "fm_no_va"); + +AssertEq(fm_va_01(), 0, "fm_va_01"); +AssertEq(fm_va_01(fm_empty()), 0, "fm_va_01"); +AssertEq(fm_va_01(fm_empty(1)), 0, "fm_va_01"); +AssertEq(fm_va_01(1), 1, "fm_va_01"); +AssertEq(fm_va_01(,), 1, "fm_va_01"); +AssertEq(fm_va_01(,1), 1, "fm_va_01"); + +AssertEq(fm_va_01n(), 0, "fm_va_01n"); +AssertEq(fm_va_01n(fm_empty()), 0, "fm_va_01n"); +AssertEq(fm_va_01n(x), 1, "fm_va_01n"); +AssertEq(fm_va_01n(fm_cat(x, y)), 1, "fm_va_01n"); +AssertEq(fm_va_01n(fm_head(x, y)), 1, "fm_va_01n"); +AssertEq(fm_va_01n(fm_tail(x)), 0, "fm_va_01n"); +AssertEq(fm_va_01n(fm_tail(x, y)), 1, "fm_va_01n"); +AssertEqStr(fm_str(fm_va_01n(,)), "n", "fm_va_01n"); +AssertEqStr(fm_str(fm_va_01n(x,)), "n", "fm_va_01n"); +AssertEqStr(fm_str(fm_va_01n(1,2)), "n", "fm_va_01n"); +AssertEqStr(fm_str(fm_va_01n(fm_tail(1,2,3))), "n", "fm_va_01n"); + +AssertEq(fm_or_default()(5), 5, "fm_or_default"); +AssertEq(fm_or_default(4)(5), 4, "fm_or_default"); +AssertEqStr(fm_str(fm_or_default()(5, 6)), "5, 6", "fm_or_default"); +AssertEqStr(fm_str(fm_or_default(3, 4)(5, 6)), "3, 4", "fm_or_default"); + +AssertEqStr(fm_str(fm_when_isnt_empty()(5)), "", "fm_when_isnt_empty"); +AssertEqStr(fm_str(fm_when_isnt_empty(fm_empty())(5)), "", "fm_when_isnt_empty"); +AssertEqStr(fm_str(fm_when_isnt_empty(1)(5)), "5", "fm_when_isnt_empty"); +AssertEqStr(fm_str(fm_when_isnt_empty(1)(5, 6)), "5, 6", "fm_when_isnt_empty"); + +AssertEq(fm_is_tuple(), 0, "fm_is_tuple"); +AssertEq(fm_is_tuple(1), 0, "fm_is_tuple"); +AssertEq(fm_is_tuple(1, 2), 0, "fm_is_tuple"); +AssertEq(fm_is_tuple(,), 0, "fm_is_tuple"); +AssertEq(fm_is_tuple(()), 1, "fm_is_tuple"); +AssertEq(fm_is_tuple((,)), 1, "fm_is_tuple"); +AssertEq(fm_is_tuple((fm_comma)), 1, "fm_is_tuple"); + +#define add_x(y) y##x +#define add_ax(a, y) y##a##x +AssertEqStr(fm_str(fm_eval_foreach(add_x)), + "", "fm_eval_foreach"); +AssertEqStr(fm_str(fm_eval_foreach(add_x, a)), + "ax", "fm_eval_foreach"); +AssertEqStr(fm_str(fm_eval_foreach(add_x, a, b)), + "ax bx", "fm_eval_foreach"); +AssertEqStr(fm_str(fm_eval_foreach(add_x, a, b, c)), + "ax bx cx", "fm_eval_foreach"); +AssertEqStr(fm_str(fm_eval_foreach(add_x, a, b, c, d)), + "ax bx cx dx", "fm_eval_foreach"); +AssertEqStr(fm_str(fm_eval_foreach(add_x, a, b, c, d, e)), + "ax bx cx dx ex", "fm_eval_foreach"); + +AssertEqStr(fm_str(fm_eval_foreach_comma(add_x)), + "", "fm_eval_foreach_comma"); +AssertEqStr(fm_str(fm_eval_foreach_comma(add_x, a)), + "ax", "fm_eval_foreach_comma"); +AssertEqStr(fm_str(fm_eval_foreach_comma(add_x, a, b)), + "ax , bx", "fm_eval_foreach_comma"); +AssertEqStr(fm_str(fm_eval_foreach_comma(add_x, a, b, c)), + "ax , bx , cx", "fm_eval_foreach_comma"); +AssertEqStr(fm_str(fm_eval_foreach_comma(add_x, a, b, c, d)), + "ax , bx , cx , dx", "fm_eval_foreach_comma"); +AssertEqStr(fm_str(fm_eval_foreach_comma(add_x, a, b, c, d, e)), + "ax , bx , cx , dx , ex", "fm_eval_foreach_comma"); + +AssertEqStr(fm_str(fm_eval_foreach_arg(add_ax, Z)), + "", "fm_eval_foreach_arg"); +AssertEqStr(fm_str(fm_eval_foreach_arg(add_ax, Z, a)), + "aZx", "fm_eval_foreach_arg"); +AssertEqStr(fm_str(fm_eval_foreach_arg(add_ax, Z, a, b)), + "aZx bZx", "fm_eval_foreach_arg"); +AssertEqStr(fm_str(fm_eval_foreach_arg(add_ax, Z, a, b, c)), + "aZx bZx cZx", "fm_eval_foreach_arg"); +AssertEqStr(fm_str(fm_eval_foreach_arg(add_ax, Z, a, b, c, d)), + "aZx bZx cZx dZx", "fm_eval_foreach_arg"); +AssertEqStr(fm_str(fm_eval_foreach_arg(add_ax, Z, a, b, c, d, e)), + "aZx bZx cZx dZx eZx", "fm_eval_foreach_arg"); + +#define map_tuple(t, ...) map_tuple_##t(__VA_ARGS__) +#define map_tuple_k(x) fm_cat(x, K) +#define map_tuple_n(x, y) fm_cat3(x, y, N) + +#define map_tuple_a(a, t, ...) map_tuple_a##t(a, __VA_ARGS__) +#define map_tuple_ak(a, x) fm_cat3(x, a, K) +#define map_tuple_an(a, x, y) fm_cat4(x, a, y, N) + +AssertEqStr(fm_str(fm_eval_tuples(map_tuple)), + "", "fm_eval_tuples"); +AssertEqStr(fm_str(fm_eval_tuples(map_tuple, (k, a))), + "aK", "fm_eval_tuples"); +AssertEqStr(fm_str(fm_eval_tuples(map_tuple, (n, a, b))), + "abN", "fm_eval_tuples"); +AssertEqStr(fm_str(fm_eval_tuples(map_tuple, (k, a), (n, a, b))), + "aK abN", "fm_eval_tuples"); +AssertEqStr(fm_str(fm_eval_tuples(map_tuple, (k, a), (n, a, b), (k, a))), + "aK abN aK", "fm_eval_tuples"); +AssertEqStr(fm_str(fm_eval_tuples(map_tuple, (k, a), (n, a, b), (k, c))), + "aK abN cK", "fm_eval_tuples"); +AssertEqStr(fm_str(fm_eval_tuples(map_tuple, (k, a), (n, a, b), (k, c), (n, c, d))), + "aK abN cK cdN", "fm_eval_tuples"); +AssertEqStr(fm_str(fm_eval_tuples(map_tuple, (k, a), (n, a, b), (k, c), (n, c, d), (k, e))), + "aK abN cK cdN eK", "fm_eval_tuples"); + +AssertEqStr(fm_str(fm_eval_tuples_comma(map_tuple)), + "", "fm_eval_tuples_comma"); +AssertEqStr(fm_str(fm_eval_tuples_comma(map_tuple, (k, a))), + "aK", "fm_eval_tuples_comma"); +AssertEqStr(fm_str(fm_eval_tuples_comma(map_tuple, (n, a, b))), + "abN", "fm_eval_tuples_comma"); +AssertEqStr(fm_str(fm_eval_tuples_comma(map_tuple, (k, a), (n, a, b))), + "aK , abN", "fm_eval_tuples_comma"); +AssertEqStr(fm_str(fm_eval_tuples_comma(map_tuple, (k, a), (n, a, b), (k, c))), + "aK , abN , cK", "fm_eval_tuples_comma"); +AssertEqStr(fm_str(fm_eval_tuples_comma(map_tuple, (k, a), (n, a, b), (k, c), (n, c, d))), + "aK , abN , cK , cdN", "fm_eval_tuples_comma"); +AssertEqStr(fm_str(fm_eval_tuples_comma(map_tuple, (k, a), (n, a, b), (k, c), (n, c, d), (k, e))), + "aK , abN , cK , cdN , eK", "fm_eval_tuples_comma"); + +AssertEqStr(fm_str(fm_eval_tuples_arg(map_tuple_a, Y)), + "", "fm_eval_tuples_arg"); +AssertEqStr(fm_str(fm_eval_tuples_arg(map_tuple_a, Y, (k, a))), + "aYK", "fm_eval_tuples_arg"); +AssertEqStr(fm_str(fm_eval_tuples_arg(map_tuple_a, Y, (n, a, b))), + "aYbN", "fm_eval_tuples_arg"); +AssertEqStr(fm_str(fm_eval_tuples_arg(map_tuple_a, Y, (k, a), (n, a, b))), + "aYK aYbN", "fm_eval_tuples_arg"); +AssertEqStr(fm_str(fm_eval_tuples_arg(map_tuple_a, Y, (k, a), (n, a, b), (k, c))), + "aYK aYbN cYK", "fm_eval_tuples_arg"); +AssertEqStr(fm_str(fm_eval_tuples_arg(map_tuple_a, Y, (k, a), (n, a, b), (k, c), (n, c, d))), + "aYK aYbN cYK cYdN", "fm_eval_tuples_arg"); +AssertEqStr(fm_str(fm_eval_tuples_arg(map_tuple_a, Y, (k, a), (n, a, b), (k, c), (n, c, d), (k, e))), + "aYK aYbN cYK cYdN eYK", "fm_eval_tuples_arg"); + +} \ No newline at end of file From 68113e38d832ec646adf0070b5cba097bcbd2f7f Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 16 Dec 2022 02:20:49 +0300 Subject: [PATCH 200/339] fu_utils: add test for correct thread termination --- src/fu_util/test/CMakeLists.txt | 4 ++ src/fu_util/test/thread.c | 69 +++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+) create mode 100644 src/fu_util/test/thread.c diff --git a/src/fu_util/test/CMakeLists.txt b/src/fu_util/test/CMakeLists.txt index 05eea86c6..3752ecf18 100644 --- a/src/fu_util/test/CMakeLists.txt +++ b/src/fu_util/test/CMakeLists.txt @@ -23,6 +23,9 @@ target_link_libraries(sort_p fu_utils) add_executable(obj1 obj1.c) target_link_libraries(obj1 fu_utils) +add_executable(thread thread.c) +target_link_libraries(thread fu_utils) + enable_testing() add_test(NAME fm COMMAND fm) @@ -33,3 +36,4 @@ add_test(NAME fuprintf COMMAND fuprintf) add_test(NAME sort COMMAND sort) add_test(NAME sort_p COMMAND sort_p) add_test(NAME obj1 COMMAND obj1) +add_test(NAME thread COMMAND thread) diff --git a/src/fu_util/test/thread.c b/src/fu_util/test/thread.c new file mode 100644 index 000000000..1058fbc52 --- /dev/null +++ b/src/fu_util/test/thread.c @@ -0,0 +1,69 @@ +/* vim: set expandtab autoindent cindent ts=4 sw=4 sts=4 */ +#include +#include + +#include +#include + +#include + + +typedef struct FlagShip { + bool *flag; +} FlagShip; +#define kls__FlagShip mth(fobjDispose) +fobj_klass(FlagShip); + +static bool theFlag1 = false; +static bool theFlag2 = false; +static bool theFlag3 = false; + +static void +FlagShip_fobjDispose(VSelf) +{ + Self(FlagShip); + *self->flag = true; +} + +fobj_klass_handle(FlagShip); + +static +int thr_func3(FlagShip *f) +{ + FOBJ_FUNC_ARP(); + $unref($ref(f)); + $alloc(FlagShip, .flag = &theFlag3); + pthread_exit(NULL); + return 1; +} + +static +int thr_func2(FlagShip *f) +{ + FOBJ_FUNC_ARP(); + $unref($ref(f)); + return theFlag2 + thr_func3($alloc(FlagShip, .flag = &theFlag2)); +} + +static +void* thr_func1(void *arg) +{ + FOBJ_FUNC_ARP(); + printf("%d\n", theFlag1 + thr_func2($alloc(FlagShip, .flag = &theFlag1))); + return NULL; +} + +int +main(int argc, char** argv) +{ + pthread_t th; + void* res; + fobj_init(); + if (pthread_create(&th, NULL, thr_func1, NULL)) + ft_log(FT_FATAL, "Can't create\n"); + if (pthread_join(th, &res)) + ft_log(FT_FATAL, "Can't join\n"); + ft_assert(theFlag1); + ft_assert(theFlag2); + ft_assert(theFlag3); +} \ No newline at end of file From f0215bbcc4882b0381980bb0adbb46ff663710af Mon Sep 17 00:00:00 2001 From: Sergey Fukanchik Date: Fri, 16 Dec 2022 11:55:17 +0300 Subject: [PATCH 201/339] PBCKP-415 use pio for catalog_get_backup_list --- src/catalog.c | 80 +++++++++++++++------------------------------------ 1 file changed, 23 insertions(+), 57 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index aa5351e47..accef251f 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -802,27 +802,6 @@ pgBackupGetBackupMode(pgBackup *backup, bool show_color) return backupModes[backup->backup_mode]; } -static bool -IsDir(const char *dirpath, const char *entry, fio_location location) -{ - FOBJ_FUNC_ARP(); - char path[MAXPGPATH]; - pio_stat_t st; - err_i err; - - join_path_components(path, dirpath, entry); - - st = $i(pioStat, pioDriveForLocation(location), - .path = path, .follow_symlink = false, .err = &err); - if ($haserr(err)) - { - ft_logerr(FT_WARNING, $errmsg(err), "IsDir"); - return false; - } - - return st.pst_kind == PIO_KIND_DIRECTORY; -} - /* * Create list of instances in given backup catalog. * @@ -893,35 +872,35 @@ catalog_get_instance_list(CatalogState *catalogState) parray * catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id) { - DIR *data_dir = NULL; - struct dirent *data_ent = NULL; parray *backups = NULL; + parray *files = NULL; int i; - /* open backup instance backups directory */ - data_dir = fio_opendir(FIO_BACKUP_HOST, instanceState->instance_backup_subdir_path); - if (data_dir == NULL) - { - elog(WARNING, "cannot open directory \"%s\": %s", instanceState->instance_backup_subdir_path, - strerror(errno)); - goto err_proc; - } + files = parray_new(); + backup_list_dir(files, instanceState->instance_backup_subdir_path); /* scan the directory and list backups */ backups = parray_new(); - for (; (data_ent = fio_readdir(data_dir)) != NULL; errno = 0) + for(i = 0; i < parray_num(files); ++i) { char backup_conf_path[MAXPGPATH]; char data_path[MAXPGPATH]; + char backup_dir_name[MAXPGPATH]; + pgFile *file = (pgFile *) parray_get(files, i); pgBackup *backup = NULL; + char *slash; + + if (strcmp(file->name, BACKUP_CONTROL_FILE) != 0) + continue; - /* skip not-directory entries and hidden entries */ - if (!IsDir(instanceState->instance_backup_subdir_path, data_ent->d_name, FIO_BACKUP_HOST) - || data_ent->d_name[0] == '.') + slash = strchr(file->rel_path, '/'); + if(!slash) continue; + memcpy(backup_dir_name, file->rel_path, slash - file->rel_path); + backup_dir_name[slash - file->rel_path] = 0; /* open subdirectory of specific backup */ - join_path_components(data_path, instanceState->instance_backup_subdir_path, data_ent->d_name); + join_path_components(data_path, instanceState->instance_backup_subdir_path, backup_dir_name); /* read backup information from BACKUP_CONTROL_FILE */ join_path_components(backup_conf_path, data_path, BACKUP_CONTROL_FILE); @@ -931,12 +910,12 @@ catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id { backup = pgut_new0(pgBackup); pgBackupInit(backup, instanceState->backup_location); - backup->start_time = base36dec(data_ent->d_name); + backup->start_time = base36dec(backup_dir_name); /* XXX BACKUP_ID change it when backup_id wouldn't match start_time */ Assert(backup->backup_id == 0 || backup->backup_id == backup->start_time); backup->backup_id = backup->start_time; } - else if (strcmp(backup_id_of(backup), data_ent->d_name) != 0) + else if (strcmp(backup_id_of(backup), backup_dir_name) != 0) { /* TODO there is no such guarantees */ elog(WARNING, "backup ID in control file \"%s\" doesn't match name of the backup folder \"%s\"", @@ -944,7 +923,6 @@ catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id } backup->root_dir = pgut_strdup(data_path); - backup->database_dir = pgut_malloc(MAXPGPATH); join_path_components(backup->database_dir, backup->root_dir, DATABASE_DIR); @@ -961,16 +939,15 @@ catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id parray_append(backups, backup); } - if (errno) + parray_walk(files, pgFileFree); + parray_free(files); + + if (parray_num(backups) == 0) { - elog(WARNING, "Cannot read backup root directory \"%s\": %s", - instanceState->instance_backup_subdir_path, strerror(errno)); - goto err_proc; + elog(WARNING, "Cannot find any backups in \"%s\"", + instanceState->instance_backup_subdir_path); } - fio_closedir(data_dir); - data_dir = NULL; - parray_qsort(backups, pgBackupCompareIdDesc); /* Link incremental backups with their ancestors.*/ @@ -991,17 +968,6 @@ catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id } return backups; - -err_proc: - if (data_dir) - fio_closedir(data_dir); - if (backups) - parray_walk(backups, pgBackupFree); - parray_free(backups); - - elog(ERROR, "Failed to get backup list"); - - return NULL; } /* From 81e2f7fe7503bced7aa3777192e68372423858f9 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 16 Dec 2022 23:22:14 +0300 Subject: [PATCH 202/339] [PBCKP-416] do_catchup calls db_list_dir with backup_logs = false. --- src/catchup.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/catchup.c b/src/catchup.c index affc40578..4af3caed1 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -615,7 +615,6 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, pioDrive_i local_location = pioDriveForLocation(FIO_LOCAL_HOST); PGconn *source_conn = NULL; PGNodeInfo source_node_info; - bool backup_logs = false; parray *source_filelist = NULL; pgFile *source_pg_control_file = NULL; parray *dest_filelist = NULL; @@ -649,9 +648,9 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, if (current.backup_mode != BACKUP_MODE_FULL) { dest_filelist = parray_new(); - db_list_dir(dest_filelist, dest_pgdata, true, backup_logs, 0); + db_list_dir(dest_filelist, dest_pgdata, true, false, 0); filter_filelist(dest_filelist, dest_pgdata, exclude_absolute_paths_list, exclude_relative_paths_list, "Destination"); - exclude_files(dest_filelist, backup_logs); + exclude_files(dest_filelist, false); // fill dest_redo.lsn and dest_redo.tli get_redo(FIO_LOCAL_HOST, dest_pgdata, &dest_redo); @@ -721,8 +720,8 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, source_filelist = parray_new(); /* list files with the logical path. omit $PGDATA */ - db_list_dir(source_filelist, source_pgdata, true, backup_logs, 0); - exclude_files(source_filelist, backup_logs); + db_list_dir(source_filelist, source_pgdata, true, false, 0); + exclude_files(source_filelist, false); //REVIEW FIXME. Let's fix that before release. // TODO what if wal is not a dir (symlink to a dir)? From 61c4d9031680d3355251876383b51531272fc568 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sat, 17 Dec 2022 01:20:46 +0300 Subject: [PATCH 203/339] ft_strbuf_cat_zt and ft_bytes_shift_str to easier remote operations --- src/fu_util/ft_util.h | 6 ++++++ src/fu_util/impl/ft_impl.c | 16 ++++++++++++++++ src/fu_util/impl/ft_impl.h | 11 +++++++++++ 3 files changed, 33 insertions(+) diff --git a/src/fu_util/ft_util.h b/src/fu_util/ft_util.h index c1a0865d6..45857337f 100644 --- a/src/fu_util/ft_util.h +++ b/src/fu_util/ft_util.h @@ -403,6 +403,9 @@ ft_inline FT_CMP_RES ft_strcmpc(ft_str_t str, const char* oth); ft_inline void ft_str_consume(ft_str_t *str, size_t cut); +/* shift zero-terminated string. Will assert if no zero-byte found and it is not last */ +extern ft_str_t ft_bytes_shift_zt(ft_bytes_t *bytes); + /* * String buffer. * It could be growable or fixed. @@ -454,10 +457,13 @@ ft_inline bool ft_strbuf_ensure(ft_strbuf_t *buf, size_t n); /* All functions below returns false if fixed buffer was overflowed */ ft_inline bool ft_strbuf_may (ft_strbuf_t *buf); ft_inline bool ft_strbuf_cat (ft_strbuf_t *buf, ft_str_t s); +/* cat string together with zero-terminated byte */ +ft_inline bool ft_strbuf_cat_zt(ft_strbuf_t *buf, ft_str_t s); ft_inline bool ft_strbuf_catbytes(ft_strbuf_t *buf, ft_bytes_t b); ft_inline bool ft_strbuf_cat1 (ft_strbuf_t *buf, char c); ft_inline bool ft_strbuf_cat2 (ft_strbuf_t *buf, char c1, char c2); ft_inline bool ft_strbuf_catc (ft_strbuf_t *buf, const char *s); +ft_inline bool ft_strbuf_catc_zt(ft_strbuf_t *buf, const char *s); ft_gnu_printf(2, 3) extern bool ft_strbuf_catf (ft_strbuf_t *buf, const char *fmt, ...); ft_gnu_printf(2, 0) diff --git a/src/fu_util/impl/ft_impl.c b/src/fu_util/impl/ft_impl.c index 84b47714e..3239cbe25 100644 --- a/src/fu_util/impl/ft_impl.c +++ b/src/fu_util/impl/ft_impl.c @@ -687,6 +687,22 @@ ft_bytes_shift_line(ft_bytes_t *bytes) return ft_bytes(p, i); } +ft_str_t +ft_bytes_shift_zt(ft_bytes_t *bytes) +{ + size_t i; + char *p = bytes->ptr; + + for (i = 0; i < bytes->len; i++) { + if (p[i] == '\0') { + ft_bytes_consume(bytes, i+1); + return ft_str(p, i); + } + } + + ft_assert(bytes->len == 0, "ft_bytes_shift_str have to be on zero-terminated bytes"); + return ft_str(NULL, 0); +} size_t ft_bytes_find_bytes(ft_bytes_t haystack, ft_bytes_t needle) diff --git a/src/fu_util/impl/ft_impl.h b/src/fu_util/impl/ft_impl.h index 987dd8251..786538fed 100644 --- a/src/fu_util/impl/ft_impl.h +++ b/src/fu_util/impl/ft_impl.h @@ -521,6 +521,12 @@ ft_strbuf_cat(ft_strbuf_t *buf, ft_str_t s) { return ft_strbuf_catbytes(buf, ft_bytes(s.ptr, s.len)); } +ft_inline bool +ft_strbuf_cat_zt(ft_strbuf_t *buf, ft_str_t s) { + /* we could actually reuse ft_strbuf_catbytes */ + return ft_strbuf_catbytes(buf, ft_bytes(s.ptr, s.len+1)); +} + ft_inline bool ft_strbuf_catbytes(ft_strbuf_t *buf, ft_bytes_t s) { if (!ft_strbuf_may(buf)) @@ -578,6 +584,11 @@ ft_strbuf_catc(ft_strbuf_t *buf, const char *s) { return ft_strbuf_cat(buf, ft_cstr(s)); } +ft_inline bool +ft_strbuf_catc_zt(ft_strbuf_t *buf, const char *s) { + return ft_strbuf_cat_zt(buf, ft_cstr(s)); +} + ft_inline void ft_strbuf_reset_for_reuse(ft_strbuf_t *buf) { buf->len = 0; From eaead56049c196dbaa96e083a7443bae16ac7119 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sat, 17 Dec 2022 01:34:17 +0300 Subject: [PATCH 204/339] use ft_strbuf_catc_zt + ft_bytes_shift_zt --- src/utils/file.c | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index a6dc8a2da..f5ff25f37 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -2350,6 +2350,8 @@ fio_communicate(int in, int out) pioDBDrive_i drive; pio_stat_t st; ft_bytes_t bytes; + ft_str_t path; + ft_str_t path2; int rc; int tmp_fd; pg_crc32 crc; @@ -2448,7 +2450,10 @@ fio_communicate(int in, int out) IO_CHECK(fio_write_all(out, &st, sizeof(st)), sizeof(st)); break; case FIO_FILES_ARE_SAME: - hdr.arg = (int)$i(pioFilesAreSame, drive, buf, buf+strlen(buf)+1); + bytes = ft_bytes(buf, hdr.size); + path = ft_bytes_shift_zt(&bytes); + path2 = ft_bytes_shift_zt(&bytes); + hdr.arg = (int)$i(pioFilesAreSame, drive, path.ptr, path2.ptr); hdr.size = 0; IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); break; @@ -2475,8 +2480,8 @@ fio_communicate(int in, int out) break; case FIO_WRITE_FILE_AT_ONCE: bytes = ft_bytes(buf, hdr.size); - ft_bytes_consume(&bytes, strlen(buf)+1); - err = $i(pioWriteFile, drive, .path = buf, + path = ft_bytes_shift_zt(&bytes); + err = $i(pioWriteFile, drive, .path = path.ptr, .content = bytes, .binary = hdr.arg); if ($haserr(err)) { @@ -2810,6 +2815,14 @@ typedef struct pioLocalWriteFile iface(pioWriteCloser, pioDBWriter) fobj_klass(pioLocalWriteFile); +typedef struct pioLocalDir +{ + ft_str_t path; + DIR* dir; +} pioLocalDir; +#define kls__pioLocalDir iface__pioDirIter, iface(pioDirIter), mth(fobjDispose) +fobj_klass(pioLocalDir); + typedef struct pioRemoteFile { pioFile p; @@ -3984,8 +3997,7 @@ pioRemoteDrive_pioWriteFile(VSelf, path_t path, ft_bytes_t content, bool binary) return $iresult(err); } - ft_strbuf_catc(&buf, path); - ft_strbuf_cat1(&buf, '\x00'); + ft_strbuf_catc_zt(&buf, path); ft_strbuf_catbytes(&buf, content); hdr = (fio_header){ @@ -4306,8 +4318,7 @@ pioRemoteDrive_pioOpenRewrite(VSelf, path_t path, int permissions, ft_strbuf_catbytes(&buf, ft_bytes(&hdr, sizeof(hdr))); ft_strbuf_catbytes(&buf, ft_bytes(&req, sizeof(req))); - ft_strbuf_catc(&buf, path); - ft_strbuf_cat1(&buf, '\0'); + ft_strbuf_catc_zt(&buf, path); ((fio_header*)buf.ptr)->size = buf.len - sizeof(hdr); @@ -4354,8 +4365,7 @@ pioRemoteDrive_pioOpenWrite(VSelf, path_t path, int permissions, ft_strbuf_catbytes(&buf, ft_bytes(&hdr, sizeof(hdr))); ft_strbuf_catbytes(&buf, ft_bytes(&req, sizeof(req))); - ft_strbuf_catc(&buf, path); - ft_strbuf_cat1(&buf, '\0'); + ft_strbuf_catc_zt(&buf, path); ((fio_header*)buf.ptr)->size = buf.len - sizeof(hdr); @@ -5466,8 +5476,7 @@ pioRemoteDrive_pioIteratePages(VSelf, path_t from_fullpath, ft_strbuf_catbytes(&buf, ft_bytes(&hdr, sizeof(hdr))); ft_strbuf_catbytes(&buf, ft_bytes(&req, sizeof(req))); ft_strbuf_catbytes(&buf, ft_bytes(pagemap.bitmap, pagemap.bitmapsize)); - ft_strbuf_catc(&buf, from_fullpath); - ft_strbuf_cat1(&buf, '\0'); + ft_strbuf_catc_zt(&buf, from_fullpath); ((fio_header*)buf.ptr)->size = buf.len - sizeof(fio_header); From 0852bf9abe8bf28c7703e99447188771fe0a9dbf Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sat, 17 Dec 2022 02:09:27 +0300 Subject: [PATCH 205/339] [PBCKP-417] move `sync` paramter from pioClose to pioOpenWrite/Rewrite pioClose was not so comfortable place for. We always know at the file opening will it be sync or not. --- src/archive.c | 5 ++-- src/catalog.c | 4 ++-- src/data.c | 13 ++++++----- src/merge.c | 2 +- src/restore.c | 2 +- src/utils/file.c | 59 +++++++++++++++++++++++------------------------- src/utils/file.h | 14 ++++++------ 7 files changed, 49 insertions(+), 50 deletions(-) diff --git a/src/archive.c b/src/archive.c index 6dc78af19..d84271e73 100644 --- a/src/archive.c +++ b/src/archive.c @@ -452,7 +452,8 @@ push_file_internal(const char *wal_file_name, const char *pg_xlog_dir, if ($haserr(err)) return $iresult(err); - out = $i(pioOpenRewrite, backup_drive, .path = to_fullpath, .err = &err); + out = $i(pioOpenRewrite, backup_drive, .path = to_fullpath, + .sync = !no_sync, .err = &err); if ($haserr(err)) return $iresult(err); @@ -480,7 +481,7 @@ push_file_internal(const char *wal_file_name, const char *pg_xlog_dir, if ($haserr(err)) return $iresult(err); - err = $i(pioClose, out, .sync = !no_sync); + err = $i(pioClose, out); if ($haserr(err)) return $iresult(err); diff --git a/src/catalog.c b/src/catalog.c index accef251f..acff0f30b 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -2448,7 +2448,7 @@ write_backup_filelist(pgBackup *backup, parray *files, const char *root, join_path_components(control_path, backup->root_dir, DATABASE_FILE_LIST); - out = $i(pioOpenRewrite, backup_drive, control_path, .err = &err); + out = $i(pioOpenRewrite, backup_drive, control_path, .sync = sync, .err = &err); if ($haserr(err)) elog(ERROR, "Cannot open file list \"%s\": %s", control_path, strerror(errno)); @@ -2545,7 +2545,7 @@ write_backup_filelist(pgBackup *backup, parray *files, const char *root, if (sync) backup->content_crc = pioCRC32Counter_getCRC32(crc); - err = $i(pioClose, out, .sync=sync); + err = $i(pioClose, out); if ($haserr(err)) ft_logerr(FT_FATAL, $errmsg(err), "Closing " DATABASE_FILE_LIST ".tmp"); diff --git a/src/data.c b/src/data.c index 78edbaa80..3fa646936 100644 --- a/src/data.c +++ b/src/data.c @@ -1247,7 +1247,7 @@ create_empty_file(const char *to_root, fio_location to_location, pgFile *file) ft_logerr(FT_ERROR, $errmsg(err), "Creating empty file"); err = $i(pioWriteFinish, fl); - err = fobj_err_combine(err, $i(pioClose, fl, .sync=false)); + err = fobj_err_combine(err, $i(pioClose, fl)); if ($haserr(err)) ft_logerr(FT_ERROR, $errmsg(err), "Closing empty file"); @@ -1724,7 +1724,7 @@ send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, if($isNULL(out)) { out = $i(pioOpenRewrite, backup_location, to_fullpath, - .use_temp = false, .err = &err); + .use_temp = false, .sync = true, .err = &err); if ($haserr(err)) return $iresult(err); crc32 = pioCRC32Counter_alloc(); @@ -1779,7 +1779,7 @@ send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, file->crc = pioCRC32Counter_getCRC32(crc32); ft_dbg_assert(file->write_size == pioCRC32Counter_getSize(crc32)); - err = $i(pioClose, out, true); + err = $i(pioClose, out); if ($haserr(err)) return $iresult(err); } @@ -1846,7 +1846,7 @@ copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, if ($haserr(err)) return $iresult(err); - err = $i(pioClose, out, false); + err = $i(pioClose, out); if ($haserr(err)) return $iresult(err); @@ -2004,7 +2004,8 @@ write_page_headers(BackupPageHeader2 *headers, pgFile *file, HeaderMap *hdr_map, hdr_map->fp = $iref( $i(pioOpenRewrite, drive, .path = hdr_map->path, .permissions = FILE_PERMISSION, .binary = true, - .use_temp = is_merge, &err) ); + .use_temp = is_merge, .sync = true, + .err = &err) ); if ($haserr(err)) { ft_logerr(FT_FATAL, $errmsg(err), "opening header map for write"); @@ -2059,7 +2060,7 @@ cleanup_header_map(HeaderMap *hdr_map) /* cleanup descriptor */ if ($notNULL(hdr_map->fp)) { - err = $i(pioClose, hdr_map->fp, .sync = true); + err = $i(pioClose, hdr_map->fp); if ($haserr(err)) ft_logerr(FT_FATAL, $errmsg(err), "closing header map"); $idel(&hdr_map->fp); diff --git a/src/merge.c b/src/merge.c index bbd6b53e6..5a58e1f42 100644 --- a/src/merge.c +++ b/src/merge.c @@ -1206,7 +1206,7 @@ merge_data_file(parray *parent_chain, pgBackup *full_backup, /* when retrying merge header map cannot be trusted */ is_retry ? false : true); - err = $i(pioClose, out, .sync = false); + err = $i(pioClose, out); if ($haserr(err)) ft_logerr(FT_FATAL, $errmsg(err), "Closing target file"); diff --git a/src/restore.c b/src/restore.c index ed6e718d2..eb5c4a733 100644 --- a/src/restore.c +++ b/src/restore.c @@ -1273,7 +1273,7 @@ restore_files(void *arg) done: /* close file */ - err = $i(pioClose, out, .sync = false); + err = $i(pioClose, out); if ($haserr(err)) ft_logerr(FT_FATAL, $errmsg(err), "Close restored file"); diff --git a/src/utils/file.c b/src/utils/file.c index f5ff25f37..c4081f8e8 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -99,11 +99,13 @@ struct __attribute__((packed)) fio_req_open_rewrite { uint32_t permissions; bool binary; bool use_temp; + bool sync; }; struct __attribute__((packed)) fio_req_open_write { uint32_t permissions; bool exclusive; + bool sync; }; #if defined(WIN32) @@ -2621,6 +2623,7 @@ fio_communicate(int in, int out) .permissions = req->permissions, .binary = req->binary, .use_temp = req->use_temp, + .sync = req->sync, .err = &err); if ($haserr(err)) fio_send_pio_err(out, err); @@ -2645,6 +2648,7 @@ fio_communicate(int in, int out) fl = $i(pioOpenWrite, drive, .path = path, .permissions = req->permissions, .exclusive = req->exclusive, + .sync = req->sync, .err = &err); if ($haserr(err)) fio_send_pio_err(out, err); @@ -2734,9 +2738,8 @@ fio_communicate(int in, int out) ft_assert(hdr.handle >= 0); ft_assert(objs[hdr.handle] != NULL); - ft_assert(hdr.size == 1); - err = $(pioClose, objs[hdr.handle], .sync = buf[0]); + err = $(pioClose, objs[hdr.handle]); err = fobj_err_combine(err, async_errs[hdr.handle]); if ($haserr(err)) { @@ -2810,6 +2813,7 @@ typedef struct pioLocalWriteFile ft_bytes_t buf; bool use_temp; bool delete_in_dispose; + bool sync; } pioLocalWriteFile; #define kls__pioLocalWriteFile iface__pioDBWriter, mth(fobjDispose), \ iface(pioWriteCloser, pioDBWriter) @@ -3009,7 +3013,7 @@ pioLocalDrive_pioOpenReadStream(VSelf, path_t path, err_i *err) static pioWriteCloser_i pioLocalDrive_pioOpenRewrite(VSelf, path_t path, int permissions, - bool binary, bool use_temp, err_i *err) + bool binary, bool use_temp, bool sync, err_i *err) { Self(pioLocalDrive); ft_str_t temppath; @@ -3071,13 +3075,14 @@ pioLocalDrive_pioOpenRewrite(VSelf, path_t path, int permissions, .use_temp = use_temp, .delete_in_dispose = true, .fl = fl, + .sync = sync, .buf = buf); return $bind(pioWriteCloser, res); } static pioDBWriter_i pioLocalDrive_pioOpenWrite(VSelf, path_t path, int permissions, - bool exclusive, err_i *err) + bool exclusive, bool sync, err_i *err) { Self(pioLocalDrive); int fd = -1; @@ -3121,6 +3126,7 @@ pioLocalDrive_pioOpenWrite(VSelf, path_t path, int permissions, .use_temp = false, .delete_in_dispose = exclusive, .fl = fl, + .sync = sync, .buf = buf); return $bind(pioDBWriter, res); } @@ -3397,7 +3403,7 @@ pioLocalFile_fobjDispose(VSelf) } static err_i -pioLocalFile_pioClose(VSelf, bool sync) +pioLocalFile_pioClose(VSelf) { Self(pioLocalFile); err_i err = $noerr(); @@ -3544,7 +3550,7 @@ pioLocalWriteFile_pioTruncate(VSelf, uint64_t sz) } static err_i -pioLocalWriteFile_pioClose(VSelf, bool sync) +pioLocalWriteFile_pioClose(VSelf) { Self(pioLocalWriteFile); int fd; @@ -3563,7 +3569,7 @@ pioLocalWriteFile_pioClose(VSelf, bool sync) return $noerr(); } - if (sync) + if (self->sync) { r = fsync(fd); if (r < 0) @@ -3580,7 +3586,7 @@ pioLocalWriteFile_pioClose(VSelf, bool sync) /* mark as renamed so fobjDispose will not delete it */ self->delete_in_dispose = false; - if (sync) + if (self->sync) { /* * To guarantee renaming the file is persistent, fsync the file with its @@ -4063,7 +4069,7 @@ pioRemoteFile_doClose(VSelf) } static err_i -pioRemoteFile_pioClose(VSelf, bool sync) +pioRemoteFile_pioClose(VSelf) { Self(pioRemoteFile); err_i err = $noerr(); @@ -4296,7 +4302,7 @@ pioRemoteFile_fobjRepr(VSelf) static pioWriteCloser_i pioRemoteDrive_pioOpenRewrite(VSelf, path_t path, int permissions, - bool binary, bool use_temp, err_i *err) + bool binary, bool use_temp, bool sync, err_i *err) { Self(pioRemoteDrive); ft_strbuf_t buf = ft_strbuf_zero(); @@ -4311,7 +4317,8 @@ pioRemoteDrive_pioOpenRewrite(VSelf, path_t path, int permissions, struct fio_req_open_rewrite req = { .permissions = permissions, .binary = binary, - .use_temp = use_temp + .use_temp = use_temp, + .sync = sync, }; fio_ensure_remote(); @@ -4344,7 +4351,7 @@ pioRemoteDrive_pioOpenRewrite(VSelf, path_t path, int permissions, static pioDBWriter_i pioRemoteDrive_pioOpenWrite(VSelf, path_t path, int permissions, - bool exclusive, err_i *err) + bool exclusive, bool sync, err_i *err) { Self(pioRemoteDrive); ft_strbuf_t buf = ft_strbuf_zero(); @@ -4359,6 +4366,7 @@ pioRemoteDrive_pioOpenWrite(VSelf, path_t path, int permissions, struct fio_req_open_write req = { .permissions = permissions, .exclusive = exclusive, + .sync = sync, }; fio_ensure_remote(); @@ -4514,29 +4522,18 @@ pioRemoteWriteFile_pioTruncate(VSelf, uint64_t sz) } static err_i -pioRemoteWriteFile_pioClose(VSelf, bool sync) +pioRemoteWriteFile_pioClose(VSelf) { Self(pioRemoteWriteFile); - fio_header hdr; err_i err = $noerr(); - struct __attribute__((packed)) { - fio_header hdr; - bool sync; - } req = { - .hdr = { - .cop = PIO_CLOSE, - .handle = self->handle, - .size = 1, - }, - .sync = sync, - }; + fio_header hdr = {.cop = PIO_CLOSE, .handle = self->handle }; ft_assert(self->handle >= 0); if (self->did_async) err = $(pioWriteFinish, self); - IO_CHECK(fio_write_all(fio_stdout, &req, sizeof(req)), sizeof(req)); + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); unset_handle(self->handle); @@ -4670,7 +4667,7 @@ pioReadFilter_pioRead(VSelf, ft_bytes_t wbuf, err_i *err) } static err_i -pioReadFilter_pioClose(VSelf, bool sync) +pioReadFilter_pioClose(VSelf) { Self(pioReadFilter); err_i err = $noerr(); @@ -4682,7 +4679,7 @@ pioReadFilter_pioClose(VSelf, bool sync) r = $i(pioFltFinish, self->filter, ft_bytes(NULL, 0), &err); ft_assert(r == 0); } - if ($ifdef(errcl =, pioClose, self->wrapped.self, sync)) + if ($ifdef(errcl =, pioClose, self->wrapped.self)) err = fobj_err_combine(err, errcl); return err; } @@ -4812,7 +4809,7 @@ pioWriteFilter_pioWriteFinish(VSelf) } static err_i -pioWriteFilter_pioClose(VSelf, bool sync) +pioWriteFilter_pioClose(VSelf) { Self(pioWriteFilter); err_i err = $noerr(); @@ -4824,7 +4821,7 @@ pioWriteFilter_pioClose(VSelf, bool sync) r = $i(pioFltFinish, self->filter, ft_bytes(NULL, 0), &err); ft_assert(r == 0); } - if ($ifdef(errcl =, pioClose, self->wrapped.self, sync)) + if ($ifdef(errcl =, pioClose, self->wrapped.self)) err = fobj_err_combine(err, errcl); return err; } @@ -5197,7 +5194,7 @@ pioReSeekableReader_pioSeek(VSelf, uint64_t pos) } static err_i -pioReSeekableReader_pioClose(VSelf, bool sync) +pioReSeekableReader_pioClose(VSelf) { Self(pioReSeekableReader); err_i err; diff --git a/src/utils/file.h b/src/utils/file.h index 4975ca3e7..1cdb323c8 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -199,8 +199,7 @@ fobj_error_cstr_key(gzErrStr); #endif // File -#define mth__pioClose err_i, (bool, sync) -#define mth__pioClose__optional() (sync, false) +#define mth__pioClose err_i #define mth__pioRead size_t, (ft_bytes_t, buf), (err_i *, err) #define mth__pioWrite err_i, (ft_bytes_t, buf) #define mth__pioWriteCompressed err_i, (ft_bytes_t, buf), (CompressAlg, compress_alg) @@ -254,13 +253,14 @@ fobj_iface(pioPagesIterator); #define mth__pioOpenRead pioReader_i, (path_t, path), (err_i *, err) #define mth__pioOpenReadStream pioReadStream_i, (path_t, path), (err_i *, err) #define mth__pioOpenWrite pioDBWriter_i, (path_t, path), (int, permissions), \ - (bool, exclusive), (err_i *, err) -#define mth__pioOpenWrite__optional() (exclusive, false), (permissions, FILE_PERMISSION) + (bool, exclusive), (bool, sync), \ + (err_i *, err) +#define mth__pioOpenWrite__optional() (exclusive, false), (sync, false), (permissions, FILE_PERMISSION) #define mth__pioOpenRewrite pioWriteCloser_i, (path_t, path), (int, permissions), \ - (bool, binary), (bool, use_temp), \ - (err_i *, err) + (bool, binary), (bool, use_temp), \ + (bool, sync), (err_i *, err) #define mth__pioOpenRewrite__optional() (binary, true), (use_temp, true), \ - (permissions, FILE_PERMISSION) + (sync, false), (permissions, FILE_PERMISSION) #define mth__pioStat pio_stat_t, (path_t, path), (bool, follow_symlink), \ (err_i *, err) #define mth__pioRemove err_i, (path_t, path), (bool, missing_ok) From 2b274c7a6aeda94929edac3b6a0b981cfe3875ee Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sat, 17 Dec 2022 19:47:45 +0300 Subject: [PATCH 206/339] move pioRename from pioDrive to pioDBDrive it has single invocation in archive.c to move ".ready" to ".done" on database host. --- src/utils/file.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/utils/file.h b/src/utils/file.h index 1cdb323c8..e5259991a 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -310,14 +310,15 @@ fobj_method(pioWriteFile); fobj_method(pioIteratePages); #define iface__pioDrive mth(pioOpenRead, pioOpenReadStream), \ - mth(pioStat, pioRemove, pioRename), \ + mth(pioStat, pioRemove), \ mth(pioExists, pioGetCRC32, pioIsRemote), \ mth(pioMakeDir, pioListDir, pioRemoveDir), \ mth(pioFilesAreSame, pioReadFile, pioWriteFile), \ mth(pioOpenRewrite) fobj_iface(pioDrive); -#define iface__pioDBDrive iface__pioDrive, mth(pioIteratePages, pioOpenWrite) +#define iface__pioDBDrive iface__pioDrive, mth(pioIteratePages, pioOpenWrite), \ + mth(pioRename) fobj_iface(pioDBDrive); extern pioDrive_i pioDriveForLocation(fio_location location); From 47de7fea7575f2a0a1acffc7cf848ff77190b29c Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sat, 17 Dec 2022 14:06:41 +0300 Subject: [PATCH 207/339] ft_util: easier to parse and compose from C structs FT_BYTES_FOR - bytes buffer for variable ft_bytes_dup - alloc copy of bytes ft_bytes_shift_to/shift_must - cut to bytes from the beginning. --- src/fu_util/ft_util.h | 11 +++++++++++ src/fu_util/impl/ft_impl.h | 18 ++++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/src/fu_util/ft_util.h b/src/fu_util/ft_util.h index 45857337f..d3eacc6a0 100644 --- a/src/fu_util/ft_util.h +++ b/src/fu_util/ft_util.h @@ -323,10 +323,18 @@ ft_inline ft_bytes_t ft_bytesc(const char* ptr) { return (ft_bytes_t){.ptr = (char*)ptr, .len = strlen(ptr)}; } +#define FT_BYTES_FOR(var) ft_bytes(&(var), sizeof(var)) + ft_inline ft_bytes_t ft_bytes_alloc(size_t sz) { return ft_bytes(ft_malloc(sz), sz); } +ft_inline ft_bytes_t ft_bytes_dup(ft_bytes_t bytes) { + ft_bytes_t r = ft_bytes_alloc(bytes.len); + memmove(r.ptr, bytes.ptr, bytes.len); + return r; +} + ft_inline void ft_bytes_free(ft_bytes_t* bytes) { ft_free(bytes->ptr); *bytes = ft_bytes(NULL, 0); @@ -337,6 +345,9 @@ ft_inline size_t ft_bytes_move(ft_bytes_t *dest, ft_bytes_t *src); ft_inline ft_bytes_t ft_bytes_split(ft_bytes_t *bytes, size_t n); extern ft_bytes_t ft_bytes_shift_line(ft_bytes_t *bytes); +ft_inline bool ft_bytes_shift_to(ft_bytes_t *bytes, ft_bytes_t to); +ft_inline void ft_bytes_shift_must(ft_bytes_t *bytes, ft_bytes_t to); + extern size_t ft_bytes_find_bytes(ft_bytes_t haystack, ft_bytes_t needle); ft_inline size_t ft_bytes_find_cstr(ft_bytes_t haystack, const char *needle); ft_inline bool ft_bytes_has_cstr(ft_bytes_t haystack, const char *needle); diff --git a/src/fu_util/impl/ft_impl.h b/src/fu_util/impl/ft_impl.h index 786538fed..398d407fb 100644 --- a/src/fu_util/impl/ft_impl.h +++ b/src/fu_util/impl/ft_impl.h @@ -340,6 +340,24 @@ ft_bytes_move(ft_bytes_t *dest, ft_bytes_t *src) { return len; } +ft_inline bool +ft_bytes_shift_to(ft_bytes_t *bytes, ft_bytes_t to) +{ + if (bytes->len < to.len) + return false; + memmove(to.ptr, bytes->ptr, to.len); + ft_bytes_consume(bytes, to.len); + return true; +} + +ft_inline void +ft_bytes_shift_must(ft_bytes_t *bytes, ft_bytes_t to) +{ + ft_dbg_assert(to.len <= bytes->len); + memmove(to.ptr, bytes->ptr, to.len); + ft_bytes_consume(bytes, to.len); +} + ft_inline bool ft_bytes_starts_with(ft_bytes_t haystack, ft_bytes_t needle) { From d0d06f3e91674c50a2dd945e17645455d4b18111 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sat, 17 Dec 2022 14:52:13 +0300 Subject: [PATCH 208/339] fix ft_strbuf_ensure for zero-initialized ft_strbuf_t --- src/fu_util/impl/ft_impl.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/fu_util/impl/ft_impl.c b/src/fu_util/impl/ft_impl.c index 3239cbe25..10a6f4812 100644 --- a/src/fu_util/impl/ft_impl.c +++ b/src/fu_util/impl/ft_impl.c @@ -207,7 +207,12 @@ ft__strbuf_ensure(ft_strbuf_t *buf, size_t n) { buf->ptr = ft_realloc(buf->ptr, new_cap); else { char* newbuf = ft_malloc(new_cap); - memcpy(newbuf, buf->ptr, (size_t)buf->len+1); + if (buf->ptr != NULL) + memcpy(newbuf, buf->ptr, (size_t)buf->len+1); + else { + ft_assert(buf->len == 0); + newbuf[0] = '\0'; + } buf->ptr = newbuf; } buf->cap = new_cap-1; From 33f7c10d2d851d317ee0f6dbe7df1020171201b7 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 18 Dec 2022 00:18:54 +0300 Subject: [PATCH 209/339] fix clang warning about format(gnu_printf) --- src/fu_util/ft_util.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/fu_util/ft_util.h b/src/fu_util/ft_util.h index d3eacc6a0..d24d1614e 100644 --- a/src/fu_util/ft_util.h +++ b/src/fu_util/ft_util.h @@ -34,7 +34,11 @@ typedef SSIZE_T ssize_t; #define ft_gcc_malloc(free, idx) __attribute__((malloc)) #endif #define ft_unused __attribute__((unused)) +#if !defined(__clang__) #define ft_gnu_printf(fmt, arg) __attribute__((format(gnu_printf,fmt,arg))) +#else +#define ft_gnu_printf(fmt, arg) __attribute__((format(printf,fmt,arg))) +#endif #define ft_likely(x) __builtin_expect(!!(x), 1) #define ft_unlikely(x) __builtin_expect(!!(x), 0) #define ft_always_inline __attribute__((always_inline)) From bce53bbd3121ee346a2b576a6f9857cd6534a18c Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 18 Dec 2022 00:09:57 +0300 Subject: [PATCH 210/339] fu_util/fobj: make missing argument compilation error --- src/fu_util/fm_util.h | 5 ++- src/fu_util/fo_obj.h | 3 +- src/fu_util/impl/fo_impl.c | 29 ++---------- src/fu_util/impl/fo_impl.h | 90 +++++++++++++++----------------------- src/fu_util/test/obj1.c | 5 +-- 5 files changed, 45 insertions(+), 87 deletions(-) diff --git a/src/fu_util/fm_util.h b/src/fu_util/fm_util.h index 18a971aa7..ccd19f1a9 100644 --- a/src/fu_util/fm_util.h +++ b/src/fu_util/fm_util.h @@ -220,7 +220,7 @@ #define fm_foreach_tuple_arg_join_n(join1, join2, macro, arg, x, y, ...) \ join1() fm_apply(macro, arg, fm_expand x) \ join2() fm_apply(macro, arg, fm_expand y) \ - fm_recurs2(fm_foreach_tuple_arg_join_, fm_va_01n(__VA_ARGS__))(join1, join2, macro, arg, __VA_ARGS__) + fm_recurs2(fm_foreach_tuple_arg_join_, fm_va_01n(__VA_ARGS__))(join2, join2, macro, arg, __VA_ARGS__) #define fm_foreach_tuple_arg(macro, arg, ...) \ fm_foreach_tuple_arg_join(fm_empty, macro, arg, __VA_ARGS__) @@ -245,6 +245,9 @@ #define fm_eval_tuples_comma(macro, ...) \ fm_eval(fm_foreach_tuple_comma(macro, __VA_ARGS__)) +#define fm_eval_tuples_arg_comma(macro, arg, ...) \ + fm_eval(fm_foreach_tuple_arg_comma(macro, arg, __VA_ARGS__)) + #define fm__dumb_require_semicolon \ struct __dumb_struct_declaration_for_semicolon diff --git a/src/fu_util/fo_obj.h b/src/fu_util/fo_obj.h index 2144b8a38..f843820f4 100644 --- a/src/fu_util/fo_obj.h +++ b/src/fu_util/fo_obj.h @@ -370,8 +370,7 @@ fobj_klass(fobjBase); /* * fobjFormat should be defined for pretty printing */ -#define mth__fobjFormat void, (ft_strbuf_t*, out), (const char*, fmt) -#define mth__fobjFormat__optional() (fmt, NULL) +#define mth__fobjFormat void, (ft_strbuf_t*, out), (const char*, fmt, NULL) fobj_method(fobjFormat); /********************************* diff --git a/src/fu_util/impl/fo_impl.c b/src/fu_util/impl/fo_impl.c index 9b1dc609a..c6446e91d 100644 --- a/src/fu_util/impl/fo_impl.c +++ b/src/fu_util/impl/fo_impl.c @@ -260,32 +260,9 @@ fobj_method_implements(const fobj_t self, fobj_method_handle_t meth) { return false; } -extern void -fobj__validate_args(fobj_method_handle_t meth, - fobj_t self, - const char* const * paramnames, - const char *set, - size_t cnt) { - fobj_header_t *h; - fobj_klass_handle_t klass; - size_t i; - - ft_assert(fobj_global_state != FOBJ_RT_NOT_INITIALIZED); - ft_assert(meth > 0 && meth <= atload(&fobj_methods_n)); - ft_assert(meth != fobj__nm_mhandle(fobjDispose)()); - ft_assert(self != NULL, "call '%s' on NULL object", fobj_methods[meth].name); - - h = ((fobj_header_t*)self - 1); - assert(h->magic == FOBJ_HEADER_MAGIC); - klass = h->klass; - ft_dbg_assert(klass > 0 && klass <= atload(&fobj_klasses_n)); - - for (i = 0; i < cnt; i++) { - ft_assert(set[i] != 0, "Calling '%s' on '%s' miss argument '%s'", - fobj_methods[meth].name, - fobj_klasses[klass].name, - paramnames[i]); - } +_Noreturn +void fobj__validate_arg(const char* file, int line, const char *arg) { + ft_log(FT_FATAL, "%s:%d: missing argument %s", file, line, arg); } const char * diff --git a/src/fu_util/impl/fo_impl.h b/src/fu_util/impl/fo_impl.h index f75470328..76f353a91 100644 --- a/src/fu_util/impl/fo_impl.h +++ b/src/fu_util/impl/fo_impl.h @@ -63,7 +63,6 @@ typedef struct { #define fobj__nm_mhandle(meth) meth##__mh #define fobj__nm_do(meth) meth##__do #define fobj__nm_params_t(meth) meth##__params_t -#define fobj__nm_invoke(meth) meth##__invoke #define fobj__nm_impl_t(meth) meth##__impl #define fobj__nm_cb(meth) meth##__fetch_cb #define fobj__nm_cb_t(meth) meth##__cb_t @@ -99,7 +98,6 @@ typedef struct { fobj__nm_mhandle(meth), \ fobj__nm_params_t(meth), \ fobj__nm_do(meth), \ - fobj__nm_invoke(meth), \ fobj__nm_impl_t(meth), \ fobj__nm_cb(meth), \ fobj__nm_cb_t(meth), \ @@ -129,7 +127,7 @@ typedef struct { #define fobj__method_declare_impl(meth, handle, \ params_t, \ - meth_do, invoke_methparams, \ + meth_do, \ impl_meth_t, \ cb_meth, cb_meth_t, \ register_meth, wrap_decl, \ @@ -165,19 +163,7 @@ typedef struct { ft_inline ft_always_inline res \ meth(fobj_t self comma() fobj__mapArgs_toArgs(__VA_ARGS__)) { \ return meth_do(self, fobj_self_klass comma() fobj__mapArgs_toNames(__VA_ARGS__)); \ - } \ - \ - ft_inline ft_always_inline res \ - invoke_methparams(fobj_t self, fobj_klass_handle_t parent, params_t params) {\ - fobj__params_defaults(meth); \ - fm_when_isnt_empty(__VA_ARGS__)( \ - if (ft_unlikely(!(fobj__assertArgsAnd(__VA_ARGS__)))) { \ - const char * const params_s[] = { fobj__mapArgs_toNameStrs(__VA_ARGS__) }; \ - char set[] = {fobj__assertArgsVals(__VA_ARGS__)}; \ - fobj__validate_args(handle(), self, params_s, set, ft_arrsz(set)); \ - } ) \ - return meth_do(self, parent comma() fobj__mapArgs_toNamedParams(__VA_ARGS__)); \ - } \ + } #define fobj__method_common(meth, handle, impl_meth_t, register_meth, \ wrap_decl, comma, res, ...) \ @@ -216,35 +202,18 @@ typedef struct { fm_eval_tuples_comma(fobj__mapArgs_toNames_do, __VA_ARGS__) #define fobj__mapArgs_toNames_do(x, y, ...) y -#define fobj__mapArgs_toNameStrs(...) \ - fm_eval_tuples_comma(fobj__mapArgs_toNameStrs_do, __VA_ARGS__) -#define fobj__mapArgs_toNameStrs_do(x, y, ...) #y - -#define fobj__mapArgs_toNamedParams(...) \ - fm_eval_tuples_comma(fobj__mapArgs_toNamedParams_do, __VA_ARGS__) -#define fobj__mapArgs_toNamedParams_do(x, y, ...) params.y - -#define fobj__assertArgsAnd(...) \ - 1 fm_eval_tuples(fobj__assertArgsAnd_do, __VA_ARGS__) -#define fobj__assertArgsAnd_do(x, y, ...) & fobj__check_arg(params.y) - -#define fobj__assertArgsVals(...) \ - fm_eval_tuples_comma(fobj__assertArgsVals_do, __VA_ARGS__) -#define fobj__assertArgsVals_do(x, y, ...) fobj__check_arg(params.y) - -#define fobj__params_defaults(meth) \ - fobj__params_defaults_i(meth, fobj__nm_mthdflt(meth)()) -#define fobj__params_defaults_i(meth, ...) \ - fm_when(fm_is_tuple(fm_head(__VA_ARGS__))) ( \ - fobj__params_defaults_impl(__VA_ARGS__) \ - ) -#define fobj__params_defaults_impl(...) \ - fm_eval_tuples(fobj__params_defaults_each, __VA_ARGS__) -#define fobj__params_defaults_each(x, ...) \ - if (!fobj__check_arg(params.x)) { \ - fm_when_isnt_empty(__VA_ARGS__)( params.x = __VA_ARGS__; ) \ - params.fobj__nm_given(x) = fobj__dumb_arg; \ - } +#define fobj__mapArgs_toNamedParams(params, ...) \ + fm_eval_tuples_arg_comma(fobj__mapArgs_toNamedParams_do, params, __VA_ARGS__) +#define fobj__mapArgs_toNamedParams_do(params, x, y, ...) params.y + +#define fobj__params_defaultsEach(params, ...) \ + fm_eval_tuples_arg(fobj__params_defaultsEach_do, params, __VA_ARGS__) +#define fobj__params_defaultsEach_do(params, x, y, ...) \ + if (!fobj__check_arg(params.y)) { \ + fm_if(fm_is_empty(__VA_ARGS__), \ + fobj__validate_arg(__FILE__, __LINE__, #y), \ + params.y = __VA_ARGS__); \ + } /* Klass declarations */ @@ -433,11 +402,19 @@ typedef struct { /* Method invocation */ -#define fobj_call(meth, self, ...) \ - fobj__nm_invoke(meth)(self, fobj_self_klass, fobj_pass_params(meth, __VA_ARGS__)) +#define fobj_call(meth, self, ...) fobj__call_1(meth, self, fobj_self_klass, fm_uniq(params), __VA_ARGS__) +#define fobj__call_1(meth, self, parent, params, ...) ({\ + ft_unused fobj__nm_params_t(meth) params = fobj_pass_params(meth, __VA_ARGS__); \ + fobj__call_2(meth, (self), parent, params, fobj__nm_mth(meth)) \ + }) +#define fobj__call_2(meth, self, parent, params, ...) \ + fobj__call_3(meth, self, parent, params, __VA_ARGS__) +#define fobj__call_3(meth, self, parent, params, res, ...) \ + fobj__params_defaultsEach(params, __VA_ARGS__); \ + fobj__nm_do(meth)(self, parent fm_va_comma(__VA_ARGS__) fobj__mapArgs_toNamedParams(params, __VA_ARGS__)); #define fobj_call_super(meth, _klassh, self, ...) \ - fobj__nm_invoke(meth)(self, _klassh, fobj_pass_params(meth, __VA_ARGS__)) + fobj__call_1(meth, self, _klassh, fm_uniq(params), __VA_ARGS__) #define fobj_iface_call(meth, iface, ...) \ fobj_call(meth, (fobj_t)(iface).fobj__nm_has(meth), __VA_ARGS__) @@ -453,14 +430,14 @@ typedef struct { #define fobj_ifdef(assignment, meth, self, ...) \ fobj__ifdef_impl(assignment, meth, (self), \ - fm_uniq(cb), fm_uniq(_self), fobj__nm_cb(meth), fobj__nm_cb_t(meth), \ - fobj__nm_invoke(meth), __VA_ARGS__) -#define fobj__ifdef_impl(assignment, meth, self_, cb, self, cb_meth, cb_meth_t, \ - invoke_meth__params, ...) ({ \ + fm_uniq(cb), fm_uniq(_self), fobj__nm_cb(meth), \ + fobj__nm_cb_t(meth), __VA_ARGS__) +#define fobj__ifdef_impl(assignment, meth, self_, cb, self, \ + cb_meth, cb_meth_t, ...) ({ \ fobj_t self = (self_); \ cb_meth_t cb = cb_meth(self, fobj_self_klass, false); \ if (cb.impl != NULL) { \ - assignment invoke_meth__params(self, fobj_self_klass, fobj_pass_params(meth, __VA_ARGS__)); \ + assignment fobj_call(meth, self, __VA_ARGS__); \ } \ cb.impl != NULL; \ }) @@ -508,8 +485,11 @@ extern bool fobj_method_implements(fobj_t self, extern void* fobj_klass_method_search(fobj_klass_handle_t klass, fobj_method_handle_t meth); -extern void fobj__validate_args(fobj_method_handle_t meth, fobj_t self, - const char* const * paramnames, const char *set, size_t cnt); +extern _Noreturn +#if __OPTIMIZE__ || defined(__clang__) +__attribute__((error("missing argument"))) +#endif +void fobj__validate_arg(const char* file, int line, const char *arg); /* Variable set helpers */ diff --git a/src/fu_util/test/obj1.c b/src/fu_util/test/obj1.c index faa7aafa5..e47115048 100644 --- a/src/fu_util/test/obj1.c +++ b/src/fu_util/test/obj1.c @@ -8,8 +8,7 @@ static int verbose = 0; #define logf(...) ft_log(FT_DEBUG, __VA_ARGS__) -#define mth__ioRead ssize_t, (void *, buf), (size_t, count) -#define mth__ioRead__optional() (count, 4) +#define mth__ioRead ssize_t, (void *, buf), (size_t, count, 4) #define mth__ioClose int #define mth__ioStatus int #define mth__fobjGetError err_i @@ -206,7 +205,7 @@ int main(int argc, char** argv) { ft_assert(i != ioStatus(a)); ft_assert($ifdef(i =, ioStatus, a)); ft_assert(i == ioStatus(a)); - ft_assert(!$ifdef(,fobjFormat, a)); + ft_assert(!$ifdef(,fobjFormat, a, NULL)); err = $(fobjGetError, a); logf("Error: %s", $errmsg(err)); From 6bfdc0b0248d12c41733546816c9fe0c1ccc5e70 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 18 Dec 2022 01:31:31 +0300 Subject: [PATCH 211/339] adapt fobj missing argument compile-time error generation And it actually found error in data.c. --- src/data.c | 1 + src/utils/file.h | 26 ++++++++++---------------- 2 files changed, 11 insertions(+), 16 deletions(-) diff --git a/src/data.c b/src/data.c index 3fa646936..35eece9f4 100644 --- a/src/data.c +++ b/src/data.c @@ -1240,6 +1240,7 @@ create_empty_file(const char *to_root, fio_location to_location, pgFile *file) * use case */ fl = $i(pioOpenRewrite, drive, + .path = to_path, .permissions = file->mode, .use_temp = false, .err = &err); diff --git a/src/utils/file.h b/src/utils/file.h index e5259991a..9167d5208 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -252,22 +252,18 @@ fobj_iface(pioPagesIterator); // Drive #define mth__pioOpenRead pioReader_i, (path_t, path), (err_i *, err) #define mth__pioOpenReadStream pioReadStream_i, (path_t, path), (err_i *, err) -#define mth__pioOpenWrite pioDBWriter_i, (path_t, path), (int, permissions), \ - (bool, exclusive), (bool, sync), \ +#define mth__pioOpenWrite pioDBWriter_i, (path_t, path), (int, permissions, FILE_PERMISSION), \ + (bool, exclusive, false), (bool, sync, false), \ (err_i *, err) -#define mth__pioOpenWrite__optional() (exclusive, false), (sync, false), (permissions, FILE_PERMISSION) -#define mth__pioOpenRewrite pioWriteCloser_i, (path_t, path), (int, permissions), \ - (bool, binary), (bool, use_temp), \ - (bool, sync), (err_i *, err) -#define mth__pioOpenRewrite__optional() (binary, true), (use_temp, true), \ - (sync, false), (permissions, FILE_PERMISSION) +#define mth__pioOpenRewrite pioWriteCloser_i, (path_t, path), (int, permissions, FILE_PERMISSION), \ + (bool, binary, true), (bool, use_temp, true), \ + (bool, sync, false), (err_i *, err) #define mth__pioStat pio_stat_t, (path_t, path), (bool, follow_symlink), \ (err_i *, err) #define mth__pioRemove err_i, (path_t, path), (bool, missing_ok) #define mth__pioRename err_i, (path_t, old_path), (path_t, new_path) -#define mth__pioExists bool, (path_t, path), (pio_file_kind_e, expected_kind), \ +#define mth__pioExists bool, (path_t, path), (pio_file_kind_e, expected_kind, PIO_KIND_REGULAR), \ (err_i *, err) -#define mth__pioExists__optional() (expected_kind, PIO_KIND_REGULAR) #define mth__pioGetCRC32 pg_crc32, (path_t, path), (bool, compressed), \ (err_i *, err) /* Compare, that filename1 and filename2 is the same file */ @@ -280,11 +276,10 @@ fobj_iface(pioPagesIterator); #define mth__pioRemoveDir void, (const char *, root), (bool, root_as_well) /* pioReadFile and pioWriteFile should be used only for small files */ #define PIO_READ_WRITE_FILE_LIMIT (16*1024*1024) -#define mth__pioReadFile ft_bytes_t, (path_t, path), (bool, binary), \ +#define mth__pioReadFile ft_bytes_t, (path_t, path), (bool, binary, true), \ (err_i *, err) -#define mth__pioReadFile__optional() (binary, true) -#define mth__pioWriteFile err_i, (path_t, path), (ft_bytes_t, content), (bool, binary) -#define mth__pioWriteFile__optional() (binary, true) +#define mth__pioWriteFile err_i, (path_t, path), (ft_bytes_t, content), \ + (bool, binary, true) #define mth__pioIteratePages pioPagesIterator_i, (path_t, path), \ (int, segno), (datapagemap_t, pagemap), (XLogRecPtr, start_lsn), \ @@ -345,8 +340,7 @@ doIteratePages_impl(pioIteratePages_i drive, struct doIteratePages_params p); .just_validate = false, \ __VA_ARGS__})) -#define mth__pioSetAsync err_i, (bool, async) -#define mth__pioSetAsync__optional() (async, true) +#define mth__pioSetAsync err_i, (bool, async, true) #define mth__pioAsyncRead size_t, (ft_bytes_t, buf), (err_i*, err) #define mth__pioAsyncWrite size_t, (ft_bytes_t, buf), (err_i*, err) #define mth__pioAsyncError err_i From 04c3159edfbede5ad3ff18157d0be2847f12c7a1 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 18 Dec 2022 04:53:41 +0300 Subject: [PATCH 212/339] ft_utils: more useful staff. --- src/fu_util/ft_util.h | 12 ++++++++++++ src/fu_util/impl/ft_impl.h | 28 ++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/src/fu_util/ft_util.h b/src/fu_util/ft_util.h index d24d1614e..e4778ecf2 100644 --- a/src/fu_util/ft_util.h +++ b/src/fu_util/ft_util.h @@ -358,6 +358,8 @@ ft_inline bool ft_bytes_has_cstr(ft_bytes_t haystack, const char *needle); ft_inline bool ft_bytes_starts_with(ft_bytes_t haystack, ft_bytes_t needle); ft_inline bool ft_bytes_starts_withc(ft_bytes_t haystack, const char* needle); +ft_inline bool ft_bytes_ends_with(ft_bytes_t haystack, ft_bytes_t needle); +ft_inline bool ft_bytes_ends_withc(ft_bytes_t haystack, const char* needle); ft_inline size_t ft_bytes_spn(ft_bytes_t bytes, ft_bytes_t chars); ft_inline size_t ft_bytes_notspn(ft_bytes_t bytes, ft_bytes_t chars); @@ -407,6 +409,12 @@ ft_inline ft_str_t ft_strdup_bytes(ft_bytes_t bytes); /* use only if string was allocated */ ft_inline void ft_str_free(ft_str_t *str); +ft_inline ft_str_t ft_str_steal(ft_str_t *str) { + ft_str_t res = *str; + *str = ft_str(NULL, 0); + return res; +} + /* print string into ft_malloc-ed buffer */ extern ft_str_t ft_asprintf(const char *fmt, ...) ft_gnu_printf(1,2); extern ft_str_t ft_vasprintf(const char *fmt, va_list args) ft_gnu_printf(1,0); @@ -418,6 +426,10 @@ ft_inline FT_CMP_RES ft_strcmpc(ft_str_t str, const char* oth); ft_inline void ft_str_consume(ft_str_t *str, size_t cut); +ft_inline size_t ft_str_spnc(ft_str_t str, const char* chars); +ft_inline size_t ft_str_ends_withc(ft_str_t str, const char* needle); +ft_inline size_t ft_str_find_cstr(ft_str_t haystack, const char *needle); + /* shift zero-terminated string. Will assert if no zero-byte found and it is not last */ extern ft_str_t ft_bytes_shift_zt(ft_bytes_t *bytes); diff --git a/src/fu_util/impl/ft_impl.h b/src/fu_util/impl/ft_impl.h index 398d407fb..9cf4a06cc 100644 --- a/src/fu_util/impl/ft_impl.h +++ b/src/fu_util/impl/ft_impl.h @@ -371,6 +371,19 @@ ft_bytes_starts_withc(ft_bytes_t haystack, const char* needle) return ft_bytes_starts_with(haystack, ft_bytesc(needle)); } +ft_inline bool +ft_bytes_ends_with(ft_bytes_t haystack, ft_bytes_t needle) +{ + return haystack.len >= needle.len && + memcmp(haystack.ptr + haystack.len - needle.len, needle.ptr, needle.len) == 0; +} + +ft_inline bool +ft_bytes_ends_withc(ft_bytes_t haystack, const char* needle) +{ + return ft_bytes_ends_with(haystack, ft_bytesc(needle)); +} + ft_inline size_t ft_bytes_find_cstr(ft_bytes_t haystack, const char* needle) { @@ -476,6 +489,21 @@ ft_str_consume(ft_str_t *str, size_t cut) { str->len -= cut; } +ft_inline size_t +ft_str_spnc(ft_str_t str, const char* chars) { + return ft_bytes_spnc(ft_str2bytes(str), chars); +} + +ft_inline size_t +ft_str_ends_withc(ft_str_t str, const char* needle) { + return ft_bytes_ends_withc(ft_str2bytes(str), needle); +} + +ft_inline size_t +ft_str_find_cstr(ft_str_t haystack, const char *needle) { + return ft_bytes_find_cstr(ft_str2bytes(haystack), needle); +} + ft_inline ft_strbuf_t ft_strbuf_zero(void) { return (ft_strbuf_t){.ptr = "", .len = 0, .cap = 0}; From 133c2275d5de0b1b4ff0a9c62f044fd21e263944 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 18 Dec 2022 16:37:53 +0300 Subject: [PATCH 213/339] fix cleanup_tablespace: it is for DB, not for BACKUP --- src/dir.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dir.c b/src/dir.c index 62deaa03b..82af5eaea 100644 --- a/src/dir.c +++ b/src/dir.c @@ -1568,7 +1568,7 @@ read_database_map(pgBackup *backup) void cleanup_tablespace(const char *path) { - pioDrive_i drive = pioDriveForLocation(FIO_BACKUP_HOST); + pioDrive_i drive = pioDriveForLocation(FIO_DB_HOST); $i(pioRemoveDir, drive, .root = path, .root_as_well = false); } From 5341b9d0a759dfa46cd9c6d6c9cf86f8c12d2b31 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 18 Dec 2022 18:26:05 +0300 Subject: [PATCH 214/339] remove pioAsyncWrite/pioAsyncError --- src/utils/file.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/utils/file.h b/src/utils/file.h index 9167d5208..3d11abdfb 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -342,12 +342,8 @@ doIteratePages_impl(pioIteratePages_i drive, struct doIteratePages_params p); #define mth__pioSetAsync err_i, (bool, async, true) #define mth__pioAsyncRead size_t, (ft_bytes_t, buf), (err_i*, err) -#define mth__pioAsyncWrite size_t, (ft_bytes_t, buf), (err_i*, err) -#define mth__pioAsyncError err_i fobj_method(pioSetAsync); fobj_method(pioAsyncRead); -fobj_method(pioAsyncWrite); -fobj_method(pioAsyncError); // Filter typedef struct pioFltTransformResult { From d2843d0a11cbfdf722ea49949873e79d69a6f92e Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 18 Dec 2022 21:07:39 +0300 Subject: [PATCH 215/339] fix for fio_receive_pio_err --- src/utils/file.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/utils/file.c b/src/utils/file.c index c4081f8e8..327776d3a 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -973,7 +973,11 @@ fio_receive_pio_err(fio_header *hdr) IO_CHECK(fio_read_all(fio_stdin, err_msg, hdr->size), hdr->size); - return $syserr(pio_errno, err_msg); + if (pio_errno) + return $err(SysErr, "(remote) {causeStr}", + causeStr(err_msg), errNo(pio_errno)); + + return $err(RT, "(remote) {causeStr}", causeStr(err_msg)); } static void From abdfdd27770b19cd19f905bf1084a076e8a2be11 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 18 Dec 2022 19:43:59 +0300 Subject: [PATCH 216/339] ft_strbuf_cat_path --- src/utils/file.c | 42 ++++++++++++++++++++++++++++++++++++++++++ src/utils/file.h | 8 ++++++++ 2 files changed, 50 insertions(+) diff --git a/src/utils/file.c b/src/utils/file.c index 327776d3a..b544947a2 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -5869,6 +5869,48 @@ prepare_page(pioLocalPagesIterator *iter, BlockNumber blknum, Page page, PageSta return PageIsOk; } +/* + * skip_drive + * + * On Windows, a path may begin with "C:" or "//network/". Advance over + * this and point to the effective start of the path. + * + * (copied from PostgreSQL's src/port/path.c) + */ +#ifdef WIN32 + +static char * +skip_drive(const char *path) +{ + if (IS_DIR_SEP(path[0]) && IS_DIR_SEP(path[1])) + { + path += 2; + while (*path && !IS_DIR_SEP(*path)) + path++; + } + else if (isalpha((unsigned char) path[0]) && path[1] == ':') + { + path += 2; + } + return (char *) path; +} +#else +#define skip_drive(path) (path) +#endif + +bool +ft_strbuf_cat_path(ft_strbuf_t *buf, ft_str_t path) +{ + /* here we repeat join_path_components */ + if (buf->len > 0 && !IS_DIR_SEP(buf->ptr[buf->len-1])) + { + if (*(skip_drive(buf->ptr)) != '\0') + if (!ft_strbuf_cat1(buf, '/')) + return false; + } + + return ft_strbuf_cat(buf, path); +} fobj_klass_handle(pioLocalPagesIterator); fobj_klass_handle(pioRemotePagesIterator); diff --git a/src/utils/file.h b/src/utils/file.h index 3d11abdfb..8f2b1d625 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -414,4 +414,12 @@ extern void init_pio_line_reader(pio_line_reader *r, pioRead_i source, size_t ma extern void deinit_pio_line_reader(pio_line_reader *r); extern ft_bytes_t pio_line_reader_getline(pio_line_reader *r, err_i *err); +/* append path component */ +extern bool ft_strbuf_cat_path(ft_strbuf_t *buf, ft_str_t path); +ft_inline bool +ft_strbuf_cat_pathc(ft_strbuf_t *buf, const char *path) +{ + return ft_strbuf_cat_path(buf, ft_cstr(path)); +} + #endif From e79e5f07288a0282bd7a6cb3072c3224025db6cb Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sat, 17 Dec 2022 01:45:27 +0300 Subject: [PATCH 217/339] [PBCKP-416] pioOpenDir + pioDirNext Looks like pioListDir were no so great idea. Step to return to iterator style. --- src/utils/file.c | 308 ++++++++++++++++++++++++++++++++++++++++++++++- src/utils/file.h | 21 +++- 2 files changed, 323 insertions(+), 6 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index b544947a2..dc027cd81 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -23,6 +23,8 @@ static char *async_errormsg = NULL; #define PAGE_ZEROSEARCH_FINE_GRANULARITY 64 static const char zerobuf[PAGE_ZEROSEARCH_COARSE_GRANULARITY] = {0}; +#define PIO_DIR_REMOTE_BATCH 100 + fio_location MyLocation; typedef struct @@ -2736,6 +2738,61 @@ fio_communicate(int in, int out) IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); break; } + case PIO_DIR_OPEN: + { + ft_assert(hdr.handle >= 0 && hdr.handle < FIO_FDMAX); + ft_assert(objs[hdr.handle] == NULL); + pioDirIter_i iter; + err_i err; + + iter = $i(pioOpenDir, drive, buf, .err = &err); + if ($haserr(err)) + fio_send_pio_err(out, err); + else + { + objs[hdr.handle] = $ref(iter.self); + hdr.size = 0; + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); + } + break; + } + case PIO_DIR_NEXT: + { + ft_assert(hdr.handle >= 0 && hdr.handle < FIO_FDMAX); + ft_assert(objs[hdr.handle] != NULL); + ft_strbuf_t stats = ft_strbuf_zero(); + ft_strbuf_t names = ft_strbuf_zero(); + pio_dirent_t dirent; + int n; + + for (n = 0; n < PIO_DIR_REMOTE_BATCH; n++) + { + dirent = $(pioDirNext, objs[hdr.handle], .err = &err); + if ($haserr(err)) + break; + if (dirent.stat.pst_kind == PIO_KIND_UNKNOWN) + break; + ft_strbuf_catbytes(&stats, FT_BYTES_FOR(dirent.stat)); + ft_strbuf_cat_zt(&names, dirent.name); + } + + if ($haserr(err)) + fio_send_pio_err(out, err); + else + { + hdr.arg = n; + hdr.size = stats.len + names.len; + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); + if (n > 0) + { + IO_CHECK(fio_write_all(out, stats.ptr, stats.len), stats.len); + IO_CHECK(fio_write_all(out, names.ptr, names.len), names.len); + } + } + ft_strbuf_free(&stats); + ft_strbuf_free(&names); + break; + } case PIO_CLOSE: { err_i err; @@ -2827,6 +2884,7 @@ typedef struct pioLocalDir { ft_str_t path; DIR* dir; + ft_strbuf_t name_buf; } pioLocalDir; #define kls__pioLocalDir iface__pioDirIter, iface(pioDirIter), mth(fobjDispose) fobj_klass(pioLocalDir); @@ -2857,6 +2915,21 @@ typedef struct pioRemoteWriteFile { iface(pioWriteCloser, pioDBWriter) fobj_klass(pioRemoteWriteFile); +#define FT_SLICE dirent +#define FT_SLICE_TYPE pio_dirent_t +#include + +typedef struct pioRemoteDir +{ + ft_str_t path; + int handle; + int pos; + ft_bytes_t names_buf; + ft_arr_dirent_t entries; +} pioRemoteDir; +#define kls__pioRemoteDir iface__pioDirIter, iface(pioDirIter), mth(fobjDispose) +fobj_klass(pioRemoteDir); + typedef struct pioReadFilter { pioRead_i wrapped; pioFilter_i filter; @@ -3241,6 +3314,26 @@ pioLocalDrive_pioListDir(VSelf, parray *files, const char *root, bool handle_tab skip_hidden, external_dir_num, $bind(pioDBDrive, self)); } +static pioDirIter_i +pioLocalDrive_pioOpenDir(VSelf, path_t path, err_i* err) +{ + Self(pioLocalDrive); + DIR* dir; + fobj_reset_err(err); + + dir = opendir(path); + if (dir == NULL) + { + *err = $syserr(errno, "Cannot open dir {path:q}", path(path)); + return $null(pioDirIter); + } + + return $bind(pioDirIter, + $alloc(pioLocalDir, + .path = ft_strdupc(path), + .dir = dir)); +} + static void pioLocalDrive_pioRemoveDir(VSelf, const char *root, bool root_as_well) { FOBJ_FUNC_ARP(); @@ -3635,6 +3728,82 @@ pioLocalWriteFile_fobjDispose(VSelf) ft_bytes_free(&self->buf); } +static pio_dirent_t +pioLocalDir_pioDirNext(VSelf, err_i* err) +{ + Self(pioLocalDir); + struct dirent* ent; + pio_dirent_t entry = {.stat={.pst_kind=PIO_KIND_UNKNOWN}}; + char path[MAXPGPATH]; + fobj_reset_err(err); + + ft_assert(self->dir != NULL, "Abuse closed dir"); + + ft_strbuf_reset_for_reuse(&self->name_buf); + + for (;;) + { + errno = 0; + ent = readdir(self->dir); + if (ent == NULL && errno != 0) + *err = $syserr(errno, "Could not read dir {path:q}", + path(self->path.ptr)); + if (ent == NULL) + return entry; + + /* Skip '.', '..' and all hidden files as well */ + if (ent->d_name[0] == '.') + continue; + + join_path_components(path, self->path.ptr, ent->d_name); + entry.stat = $i(pioStat, localDrive, path, true, .err = err); + if ($haserr(*err)) + return entry; + + /* + * Add only files, directories and links. Skip sockets and other + * unexpected file formats. + */ + if (entry.stat.pst_kind != PIO_KIND_DIRECTORY && + entry.stat.pst_kind != PIO_KIND_REGULAR) + { + elog(WARNING, "Skip '%s': unexpected file kind %s", path, + pio_file_kind2str(entry.stat.pst_kind, path)); + continue; + } + + ft_strbuf_catc(&self->name_buf, ent->d_name); + entry.name = ft_strbuf_ref(&self->name_buf); + return entry; + } +} + +static err_i +pioLocalDir_pioClose(VSelf) +{ + Self(pioLocalDir); + int rc; + + rc = closedir(self->dir); + self->dir = NULL; + if (rc) + return $syserr(errno, "Could not close dir {path:q}", + path(self->path.ptr)); + return $noerr(); +} + +static void +pioLocalDir_fobjDispose(VSelf) +{ + Self(pioLocalDir); + + if (self->dir) + closedir(self->dir); + self->dir = NULL; + ft_str_free(&self->path); + ft_strbuf_free(&self->name_buf); +} + /* REMOTE DRIVE */ static pioReader_i @@ -3727,9 +3896,8 @@ pioRemoteDrive_pioFilesAreSame(VSelf, path_t file1, path_t file2) }; char _buf[512]; ft_strbuf_t buf = ft_strbuf_init_stack(_buf, sizeof(_buf)); - ft_strbuf_catc(&buf, file1); - ft_strbuf_cat1(&buf, '\x00'); - ft_strbuf_catc(&buf, file2); + ft_strbuf_catc_zt(&buf, file1); + ft_strbuf_catc_zt(&buf, file2); hdr.size = buf.len + 1; IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); @@ -3923,6 +4091,35 @@ pioRemoteDrive_pioListDir(VSelf, parray *files, const char *root, bool handle_ta pg_free(buf); } +static pioDirIter_i +pioRemoteDrive_pioOpenDir(VSelf, path_t path, err_i* err) +{ + Self(pioRemoteDrive); + fio_header hdr = { + .cop = PIO_DIR_OPEN, + .handle = find_free_handle(), + .size = strlen(path)+1, + }; + fobj_reset_err(err); + + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_write_all(fio_stdout, path, hdr.size), hdr.size); + + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + if (hdr.cop == FIO_PIO_ERROR) + { + *err = fio_receive_pio_err(&hdr); + return $null(pioDirIter); + } + ft_assert(hdr.cop == PIO_DIR_OPEN); + set_handle(hdr.handle); + return $bind(pioDirIter, + $alloc(pioRemoteDir, + .path = ft_strdupc(path), + .handle = hdr.handle, + .pos = 0)); +} + static void pioRemoteDrive_pioRemoveDir(VSelf, const char *root, bool root_as_well) { FOBJ_FUNC_ARP(); @@ -4565,6 +4762,109 @@ pioRemoteWriteFile_fobjDispose(VSelf) ft_str_free(&self->path); } +static pio_dirent_t +pioRemoteDir_pioDirNext(VSelf, err_i *err) +{ + Self(pioRemoteDir); + fio_header hdr; + pio_dirent_t entry = {.stat={.pst_kind=PIO_KIND_UNKNOWN}}; + ft_bytes_t tofree = ft_bytes(NULL, 0); + ft_bytes_t buf; + int n; + fobj_reset_err(err); + + ft_assert(self->handle >= 0, "Abuse closed dir"); + + if (self->pos == self->entries.len) + { + ft_bytes_free(&self->names_buf); + ft_arr_dirent_reset_for_reuse(&self->entries); + self->pos = 0; + + hdr = (fio_header){ + .cop = PIO_DIR_NEXT, + .handle = self->handle, + }; + + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + + if (hdr.cop == FIO_PIO_ERROR) + { + *err = fio_receive_pio_err(&hdr); + return entry; + } + ft_assert(hdr.cop == PIO_DIR_NEXT); + + if (hdr.arg == 0) + { + /* End of iteration */ + return entry; + } + + buf = ft_bytes_alloc(hdr.size); + tofree = buf; + IO_CHECK(fio_read_all(fio_stdin, buf.ptr, buf.len), buf.len); + + for (n = 0; n < hdr.arg; n++) + { + ft_bytes_shift_must(&buf, FT_BYTES_FOR(entry.stat)); + ft_arr_dirent_push(&self->entries, entry); + } + + self->names_buf = ft_bytes_dup(buf); + buf = self->names_buf; + + for (n = 0; n < self->entries.len; n++) + self->entries.ptr[n].name = ft_bytes_shift_zt(&buf); + + ft_bytes_free(&tofree); + } + + entry = self->entries.ptr[self->pos]; + self->pos++; + return entry; +} + +static err_i +pioRemoteDir_pioClose(VSelf) +{ + Self(pioRemoteDir); + err_i err = $noerr(); + fio_header hdr = {.cop = PIO_CLOSE, .handle = self->handle }; + + ft_assert(self->handle >= 0); + + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + + unset_handle(self->handle); + self->handle = -1; + + if (hdr.cop == FIO_PIO_ERROR) + err = fobj_err_combine(err, fio_receive_pio_err(&hdr)); + return err; +} + +static void +pioRemoteDir_fobjDispose(VSelf) +{ + Self(pioRemoteDir); + + if (self->handle >= 0) + { + fio_header hdr = { + .cop = PIO_DISPOSE, + .handle = self->handle, + }; + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + unset_handle(self->handle); + } + ft_str_free(&self->path); + ft_bytes_free(&self->names_buf); + ft_arr_dirent_reset_for_reuse(&self->entries); +} + pioRead_i pioWrapReadFilter(pioRead_i fl, pioFilter_i flt, size_t buf_size) { @@ -5922,6 +6222,8 @@ fobj_klass_handle(pioLocalFile, inherits(pioFile), mth(fobjDispose, fobjRepr)); fobj_klass_handle(pioRemoteFile, inherits(pioFile), mth(fobjDispose, fobjRepr)); fobj_klass_handle(pioLocalWriteFile); fobj_klass_handle(pioRemoteWriteFile); +fobj_klass_handle(pioLocalDir); +fobj_klass_handle(pioRemoteDir); fobj_klass_handle(pioWriteFilter, mth(fobjDispose, fobjRepr)); fobj_klass_handle(pioReadFilter, mth(fobjDispose, fobjRepr)); fobj_klass_handle(pioDevNull); diff --git a/src/utils/file.h b/src/utils/file.h index 8f2b1d625..dc3c82b16 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -70,6 +70,8 @@ typedef enum PIO_SEEK, PIO_TRUNCATE, PIO_GET_ASYNC_ERROR, + PIO_DIR_OPEN, + PIO_DIR_NEXT, PIO_CLOSE, PIO_DISPOSE, } fio_operations; @@ -108,12 +110,17 @@ typedef enum pio_file_kind { } pio_file_kind_e; typedef struct pio_stat { + pio_file_kind_e pst_kind; + uint32_t pst_mode; int64_t pst_size; int64_t pst_mtime; - uint32_t pst_mode; - pio_file_kind_e pst_kind; } pio_stat_t; +typedef struct pio_dirent { + pio_stat_t stat; + ft_str_t name; +} pio_dirent_t; + extern fio_location MyLocation; extern void setMyLocation(ProbackupSubcmd const subcmd); @@ -229,6 +236,12 @@ fobj_iface(pioWriteCloser); fobj_iface(pioDBWriter); fobj_iface(pioReadCloser); +// DIR +#define mth__pioDirNext pio_dirent_t, (err_i*, err) +fobj_method(pioDirNext); +#define iface__pioDirIter mth(pioDirNext, pioClose) +fobj_iface(pioDirIter); + // Pages iterator typedef struct { @@ -270,6 +283,7 @@ fobj_iface(pioPagesIterator); #define mth__pioFilesAreSame bool, (path_t, file1), (path_t, file2) #define mth__pioIsRemote bool #define mth__pioMakeDir err_i, (path_t, path), (mode_t, mode), (bool, strict) +#define mth__pioOpenDir pioDirIter_i, (path_t, path), (err_i*, err) #define mth__pioListDir void, (parray *, files), (const char *, root), \ (bool, handle_tablespaces), (bool, symlink_and_hidden), \ (bool, backup_logs), (bool, skip_hidden), (int, external_dir_num) @@ -298,6 +312,7 @@ fobj_method(pioIsRemote); fobj_method(pioGetCRC32); fobj_method(pioMakeDir); fobj_method(pioFilesAreSame); +fobj_method(pioOpenDir); fobj_method(pioListDir); fobj_method(pioRemoveDir); fobj_method(pioReadFile); @@ -307,7 +322,7 @@ fobj_method(pioIteratePages); #define iface__pioDrive mth(pioOpenRead, pioOpenReadStream), \ mth(pioStat, pioRemove), \ mth(pioExists, pioGetCRC32, pioIsRemote), \ - mth(pioMakeDir, pioListDir, pioRemoveDir), \ + mth(pioMakeDir, pioOpenDir, pioListDir, pioRemoveDir), \ mth(pioFilesAreSame, pioReadFile, pioWriteFile), \ mth(pioOpenRewrite) fobj_iface(pioDrive); From 17badb0c26683d7bcd50fa72b75e73ae187d2bae Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sat, 17 Dec 2022 14:25:51 +0300 Subject: [PATCH 218/339] Revert "PBCKP-415 use pio for catalog_get_backup_list" This reverts commit f0215bbcc4882b0381980bb0adbb46ff663710af. --- src/catalog.c | 80 ++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 57 insertions(+), 23 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index acff0f30b..d6d69a524 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -802,6 +802,27 @@ pgBackupGetBackupMode(pgBackup *backup, bool show_color) return backupModes[backup->backup_mode]; } +static bool +IsDir(const char *dirpath, const char *entry, fio_location location) +{ + FOBJ_FUNC_ARP(); + char path[MAXPGPATH]; + pio_stat_t st; + err_i err; + + join_path_components(path, dirpath, entry); + + st = $i(pioStat, pioDriveForLocation(location), + .path = path, .follow_symlink = false, .err = &err); + if ($haserr(err)) + { + ft_logerr(FT_WARNING, $errmsg(err), "IsDir"); + return false; + } + + return st.pst_kind == PIO_KIND_DIRECTORY; +} + /* * Create list of instances in given backup catalog. * @@ -872,35 +893,35 @@ catalog_get_instance_list(CatalogState *catalogState) parray * catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id) { + DIR *data_dir = NULL; + struct dirent *data_ent = NULL; parray *backups = NULL; - parray *files = NULL; int i; - files = parray_new(); - backup_list_dir(files, instanceState->instance_backup_subdir_path); + /* open backup instance backups directory */ + data_dir = fio_opendir(FIO_BACKUP_HOST, instanceState->instance_backup_subdir_path); + if (data_dir == NULL) + { + elog(WARNING, "cannot open directory \"%s\": %s", instanceState->instance_backup_subdir_path, + strerror(errno)); + goto err_proc; + } /* scan the directory and list backups */ backups = parray_new(); - for(i = 0; i < parray_num(files); ++i) + for (; (data_ent = fio_readdir(data_dir)) != NULL; errno = 0) { char backup_conf_path[MAXPGPATH]; char data_path[MAXPGPATH]; - char backup_dir_name[MAXPGPATH]; - pgFile *file = (pgFile *) parray_get(files, i); pgBackup *backup = NULL; - char *slash; - - if (strcmp(file->name, BACKUP_CONTROL_FILE) != 0) - continue; - slash = strchr(file->rel_path, '/'); - if(!slash) + /* skip not-directory entries and hidden entries */ + if (!IsDir(instanceState->instance_backup_subdir_path, data_ent->d_name, FIO_BACKUP_HOST) + || data_ent->d_name[0] == '.') continue; - memcpy(backup_dir_name, file->rel_path, slash - file->rel_path); - backup_dir_name[slash - file->rel_path] = 0; /* open subdirectory of specific backup */ - join_path_components(data_path, instanceState->instance_backup_subdir_path, backup_dir_name); + join_path_components(data_path, instanceState->instance_backup_subdir_path, data_ent->d_name); /* read backup information from BACKUP_CONTROL_FILE */ join_path_components(backup_conf_path, data_path, BACKUP_CONTROL_FILE); @@ -910,12 +931,12 @@ catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id { backup = pgut_new0(pgBackup); pgBackupInit(backup, instanceState->backup_location); - backup->start_time = base36dec(backup_dir_name); + backup->start_time = base36dec(data_ent->d_name); /* XXX BACKUP_ID change it when backup_id wouldn't match start_time */ Assert(backup->backup_id == 0 || backup->backup_id == backup->start_time); backup->backup_id = backup->start_time; } - else if (strcmp(backup_id_of(backup), backup_dir_name) != 0) + else if (strcmp(backup_id_of(backup), data_ent->d_name) != 0) { /* TODO there is no such guarantees */ elog(WARNING, "backup ID in control file \"%s\" doesn't match name of the backup folder \"%s\"", @@ -923,6 +944,7 @@ catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id } backup->root_dir = pgut_strdup(data_path); + backup->database_dir = pgut_malloc(MAXPGPATH); join_path_components(backup->database_dir, backup->root_dir, DATABASE_DIR); @@ -939,15 +961,16 @@ catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id parray_append(backups, backup); } - parray_walk(files, pgFileFree); - parray_free(files); - - if (parray_num(backups) == 0) + if (errno) { - elog(WARNING, "Cannot find any backups in \"%s\"", - instanceState->instance_backup_subdir_path); + elog(WARNING, "Cannot read backup root directory \"%s\": %s", + instanceState->instance_backup_subdir_path, strerror(errno)); + goto err_proc; } + fio_closedir(data_dir); + data_dir = NULL; + parray_qsort(backups, pgBackupCompareIdDesc); /* Link incremental backups with their ancestors.*/ @@ -968,6 +991,17 @@ catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id } return backups; + +err_proc: + if (data_dir) + fio_closedir(data_dir); + if (backups) + parray_walk(backups, pgBackupFree); + parray_free(backups); + + elog(ERROR, "Failed to get backup list"); + + return NULL; } /* From 2df87a0e146f997080812e2dc74e47e0c9f3cda8 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sat, 17 Dec 2022 15:02:41 +0300 Subject: [PATCH 219/339] [PBCKP-416] catalog_get_backup_list - use pioOpenDir/pioDirNext --- src/catalog.c | 69 +++++++++++++++++++-------------------------------- 1 file changed, 26 insertions(+), 43 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index d6d69a524..130ad9872 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -802,27 +802,6 @@ pgBackupGetBackupMode(pgBackup *backup, bool show_color) return backupModes[backup->backup_mode]; } -static bool -IsDir(const char *dirpath, const char *entry, fio_location location) -{ - FOBJ_FUNC_ARP(); - char path[MAXPGPATH]; - pio_stat_t st; - err_i err; - - join_path_components(path, dirpath, entry); - - st = $i(pioStat, pioDriveForLocation(location), - .path = path, .follow_symlink = false, .err = &err); - if ($haserr(err)) - { - ft_logerr(FT_WARNING, $errmsg(err), "IsDir"); - return false; - } - - return st.pst_kind == PIO_KIND_DIRECTORY; -} - /* * Create list of instances in given backup catalog. * @@ -893,35 +872,43 @@ catalog_get_instance_list(CatalogState *catalogState) parray * catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id) { - DIR *data_dir = NULL; - struct dirent *data_ent = NULL; + FOBJ_FUNC_ARP(); + pioDirIter_i data_dir; + pio_dirent_t data_ent; + err_i err = $noerr(); parray *backups = NULL; int i; /* open backup instance backups directory */ - data_dir = fio_opendir(FIO_BACKUP_HOST, instanceState->instance_backup_subdir_path); - if (data_dir == NULL) + data_dir = $i(pioOpenDir, instanceState->backup_location, + instanceState->instance_backup_subdir_path, .err = &err); + if ($haserr(err) && getErrno(err) != ENOENT) { - elog(WARNING, "cannot open directory \"%s\": %s", instanceState->instance_backup_subdir_path, - strerror(errno)); - goto err_proc; + ft_logerr(FT_FATAL, $errmsg(err), "Failed to get backup list"); } /* scan the directory and list backups */ backups = parray_new(); - for (; (data_ent = fio_readdir(data_dir)) != NULL; errno = 0) + if ($isNULL(data_dir)) + { + elog(WARNING, "Cannot find any backups in \"%s\"", + instanceState->instance_backup_subdir_path); + return backups; + } + + while ((data_ent = $i(pioDirNext, data_dir, .err=&err)).stat.pst_kind) { char backup_conf_path[MAXPGPATH]; char data_path[MAXPGPATH]; pgBackup *backup = NULL; - /* skip not-directory entries and hidden entries */ - if (!IsDir(instanceState->instance_backup_subdir_path, data_ent->d_name, FIO_BACKUP_HOST) - || data_ent->d_name[0] == '.') + /* skip not-directory entries (hidden are skipped already) */ + if (data_ent.stat.pst_kind != PIO_KIND_DIRECTORY) continue; /* open subdirectory of specific backup */ - join_path_components(data_path, instanceState->instance_backup_subdir_path, data_ent->d_name); + join_path_components(data_path, instanceState->instance_backup_subdir_path, + data_ent.name.ptr); /* read backup information from BACKUP_CONTROL_FILE */ join_path_components(backup_conf_path, data_path, BACKUP_CONTROL_FILE); @@ -931,12 +918,12 @@ catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id { backup = pgut_new0(pgBackup); pgBackupInit(backup, instanceState->backup_location); - backup->start_time = base36dec(data_ent->d_name); + backup->start_time = base36dec(data_ent.name.ptr); /* XXX BACKUP_ID change it when backup_id wouldn't match start_time */ Assert(backup->backup_id == 0 || backup->backup_id == backup->start_time); backup->backup_id = backup->start_time; } - else if (strcmp(backup_id_of(backup), data_ent->d_name) != 0) + else if (strcmp(backup_id_of(backup), data_ent.name.ptr) != 0) { /* TODO there is no such guarantees */ elog(WARNING, "backup ID in control file \"%s\" doesn't match name of the backup folder \"%s\"", @@ -961,16 +948,14 @@ catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id parray_append(backups, backup); } - if (errno) + $i(pioClose, data_dir); // ignore error + + if ($haserr(err)) { - elog(WARNING, "Cannot read backup root directory \"%s\": %s", - instanceState->instance_backup_subdir_path, strerror(errno)); + ft_logerr(FT_WARNING, $errmsg(err), "Read backup root directory"); goto err_proc; } - fio_closedir(data_dir); - data_dir = NULL; - parray_qsort(backups, pgBackupCompareIdDesc); /* Link incremental backups with their ancestors.*/ @@ -993,8 +978,6 @@ catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id return backups; err_proc: - if (data_dir) - fio_closedir(data_dir); if (backups) parray_walk(backups, pgBackupFree); parray_free(backups); From 776b3b5f2348f5b350230f1d084dac533e561fd9 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sat, 17 Dec 2022 15:13:55 +0300 Subject: [PATCH 220/339] ... --- src/utils/file.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index dc027cd81..d006dfa96 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -2743,7 +2743,6 @@ fio_communicate(int in, int out) ft_assert(hdr.handle >= 0 && hdr.handle < FIO_FDMAX); ft_assert(objs[hdr.handle] == NULL); pioDirIter_i iter; - err_i err; iter = $i(pioOpenDir, drive, buf, .err = &err); if ($haserr(err)) @@ -2795,7 +2794,6 @@ fio_communicate(int in, int out) } case PIO_CLOSE: { - err_i err; ft_assert(hdr.handle >= 0); ft_assert(objs[hdr.handle] != NULL); From f626bba162649220168f1a609944815527c2310a Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sat, 17 Dec 2022 15:32:44 +0300 Subject: [PATCH 221/339] [PBCKP-416] pioIsDirEmpty --- src/dir.c | 35 +++++----------------- src/utils/file.c | 76 +++++++++++++++++++++++++++++++++++++++++++++++- src/utils/file.h | 5 +++- 3 files changed, 86 insertions(+), 30 deletions(-) diff --git a/src/dir.c b/src/dir.c index 82af5eaea..ec84644b1 100644 --- a/src/dir.c +++ b/src/dir.c @@ -1320,36 +1320,15 @@ control_string_bad_format(ft_bytes_t str) bool dir_is_empty(const char *path, fio_location location) { - DIR *dir; - struct dirent *dir_ent; - - dir = fio_opendir(location, path); - if (dir == NULL) - { - /* Directory in path doesn't exist */ - if (errno == ENOENT) - return true; - elog(ERROR, "cannot open directory \"%s\": %s", path, strerror(errno)); - } - - errno = 0; - while ((dir_ent = fio_readdir(dir))) - { - /* Skip entries point current dir or parent dir */ - if (strcmp(dir_ent->d_name, ".") == 0 || - strcmp(dir_ent->d_name, "..") == 0) - continue; - - /* Directory is not empty */ - fio_closedir(dir); - return false; - } - if (errno) - elog(ERROR, "cannot read directory \"%s\": %s", path, strerror(errno)); + pioDrive_i drive = pioDriveForLocation(location); + err_i err; + bool is_empty; - fio_closedir(dir); + is_empty = $i(pioIsDirEmpty, drive, path, &err); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Checking dir is empty"); - return true; + return is_empty; } /* diff --git a/src/utils/file.c b/src/utils/file.c index d006dfa96..d5801b3a4 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -2792,9 +2792,24 @@ fio_communicate(int in, int out) ft_strbuf_free(&names); break; } - case PIO_CLOSE: + case PIO_IS_DIR_EMPTY: { + bool is_empty; + + is_empty = $i(pioIsDirEmpty, drive, buf, .err = &err); + if ($haserr(err)) + fio_send_pio_err(out, err); + else + { + hdr.size = 0; + hdr.arg = is_empty; + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); + } + break; + } + case PIO_CLOSE: + { ft_assert(hdr.handle >= 0); ft_assert(objs[hdr.handle] != NULL); @@ -3332,6 +3347,42 @@ pioLocalDrive_pioOpenDir(VSelf, path_t path, err_i* err) .dir = dir)); } +static bool +pioLocalDrive_pioIsDirEmpty(VSelf, path_t path, err_i* err) +{ + Self(pioLocalDrive); + DIR* dir; + struct dirent *dent; + bool is_empty = true; + fobj_reset_err(err); + + dir = opendir(path); + if (dir == NULL) + { + if (errno == ENOENT) + return true; + *err = $syserr(errno, "Cannot open dir {path:q}", path(path)); + return false; + } + + while ((dent = readdir(dir)) != NULL) + { + if (strcmp(dent->d_name, ".") == 0) + continue; + if (strcmp(dent->d_name, "..") == 0) + continue; + is_empty = false; + break; + } + + if (errno) + *err = $syserr(errno, "Couldn't read dir {path:q}", path(path)); + + closedir(dir); + + return is_empty; +} + static void pioLocalDrive_pioRemoveDir(VSelf, const char *root, bool root_as_well) { FOBJ_FUNC_ARP(); @@ -4118,6 +4169,29 @@ pioRemoteDrive_pioOpenDir(VSelf, path_t path, err_i* err) .pos = 0)); } +static bool +pioRemoteDrive_pioIsDirEmpty(VSelf, path_t path, err_i* err) +{ + Self(pioRemoteDrive); + fio_header hdr = { + .cop = PIO_IS_DIR_EMPTY, + .size = strlen(path)+1, + }; + fobj_reset_err(err); + + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_write_all(fio_stdout, path, hdr.size), hdr.size); + + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + if (hdr.cop == FIO_PIO_ERROR) + { + *err = fio_receive_pio_err(&hdr); + return false; + } + ft_assert(hdr.cop == PIO_IS_DIR_EMPTY); + return hdr.arg; +} + static void pioRemoteDrive_pioRemoveDir(VSelf, const char *root, bool root_as_well) { FOBJ_FUNC_ARP(); diff --git a/src/utils/file.h b/src/utils/file.h index dc3c82b16..07394b226 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -72,6 +72,7 @@ typedef enum PIO_GET_ASYNC_ERROR, PIO_DIR_OPEN, PIO_DIR_NEXT, + PIO_IS_DIR_EMPTY, PIO_CLOSE, PIO_DISPOSE, } fio_operations; @@ -284,6 +285,7 @@ fobj_iface(pioPagesIterator); #define mth__pioIsRemote bool #define mth__pioMakeDir err_i, (path_t, path), (mode_t, mode), (bool, strict) #define mth__pioOpenDir pioDirIter_i, (path_t, path), (err_i*, err) +#define mth__pioIsDirEmpty bool, (path_t, path), (err_i*, err) #define mth__pioListDir void, (parray *, files), (const char *, root), \ (bool, handle_tablespaces), (bool, symlink_and_hidden), \ (bool, backup_logs), (bool, skip_hidden), (int, external_dir_num) @@ -313,6 +315,7 @@ fobj_method(pioGetCRC32); fobj_method(pioMakeDir); fobj_method(pioFilesAreSame); fobj_method(pioOpenDir); +fobj_method(pioIsDirEmpty); fobj_method(pioListDir); fobj_method(pioRemoveDir); fobj_method(pioReadFile); @@ -322,7 +325,7 @@ fobj_method(pioIteratePages); #define iface__pioDrive mth(pioOpenRead, pioOpenReadStream), \ mth(pioStat, pioRemove), \ mth(pioExists, pioGetCRC32, pioIsRemote), \ - mth(pioMakeDir, pioOpenDir, pioListDir, pioRemoveDir), \ + mth(pioMakeDir, pioOpenDir, pioIsDirEmpty, pioListDir, pioRemoveDir), \ mth(pioFilesAreSame, pioReadFile, pioWriteFile), \ mth(pioOpenRewrite) fobj_iface(pioDrive); From 0d145dac21461130b3446344db79b0311be03779 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 18 Dec 2022 04:56:29 +0300 Subject: [PATCH 222/339] [PBCKP-416] use pioOpenDir in catalog_get_timelines --- src/catalog.c | 77 +++++++++++++++++++++++++++++++--------------- src/delete.c | 4 +-- src/pg_probackup.h | 21 +++++++------ src/utils/file.c | 4 --- src/utils/file.h | 14 +++++++++ 5 files changed, 79 insertions(+), 41 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index 130ad9872..32b977779 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1506,49 +1506,72 @@ create_backup_dir(pgBackup *backup, const char *backup_instance_path) parray * catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) { + FOBJ_FUNC_ARP(); int i,j,k; - parray *xlog_files_list = parray_new(); parray *timelineinfos; parray *backups; timelineInfo *tlinfo; + ft_arr_dirent_t xlog_files_list = ft_arr_init(); + pioDirIter_i dir; + pio_dirent_t file; + err_i err; /* for fancy reporting */ char begin_segno_str[MAXFNAMELEN]; char end_segno_str[MAXFNAMELEN]; + + dir = $i(pioOpenDir, instanceState->backup_location, + .path = instanceState->instance_wal_subdir_path, + .err = &err); + /* read all xlog files that belong to this archive */ - backup_list_dir(xlog_files_list, instanceState->instance_wal_subdir_path); - parray_qsort(xlog_files_list, pgFileCompareName); + if (!$isNULL(dir)) + { + while ((file = $i(pioDirNext, dir, .err = &err)).stat.pst_kind) + { + if (file.stat.pst_kind != PIO_KIND_REGULAR) + continue; + file.name = ft_strdup(file.name); + ft_arr_dirent_push(&xlog_files_list, file); + } + + ft_qsort_dirent(xlog_files_list.ptr, xlog_files_list.len, compare_dirent_by_name); + + $i(pioClose, dir); + } + if ($haserr(err) && getErrno(err) != ENOENT) + ft_logerr(FT_FATAL, $errmsg(err), "Reading wal dir"); timelineinfos = parray_new(); tlinfo = NULL; /* walk through files and collect info about timelines */ - for (i = 0; i < parray_num(xlog_files_list); i++) + for (i = 0; i < xlog_files_list.len; ft_str_free(&file.name), i++) { - pgFile *file = (pgFile *) parray_get(xlog_files_list, i); TimeLineID tli; parray *timelines; xlogFile *wal_file = NULL; + file = xlog_files_list.ptr[i]; /* * Regular WAL file. * IsXLogFileName() cannot be used here */ - if (strspn(file->name, "0123456789ABCDEF") == XLOG_FNAME_LEN) + if (strspn(file.name.ptr, "0123456789ABCDEF") == XLOG_FNAME_LEN) { int result = 0; uint32 log, seg; XLogSegNo segno = 0; char suffix[MAXFNAMELEN]; - result = sscanf(file->name, "%08X%08X%08X.%s", + result = sscanf(file.name.ptr, "%08X%08X%08X.%s", &tli, &log, &seg, (char *) &suffix); /* sanity */ if (result < 3) { - elog(WARNING, "unexpected WAL file name \"%s\"", file->name); + elog(WARNING, "unexpected WAL file name \"%s\"", file.name.ptr); continue; } @@ -1559,9 +1582,9 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) if (result == 4) { /* backup history file. Currently we don't use them */ - if (IsBackupHistoryFileName(file->name)) + if (IsBackupHistoryFileName(file.name.ptr)) { - elog(VERBOSE, "backup history file \"%s\"", file->name); + elog(VERBOSE, "backup history file \"%s\"", file.name.ptr); if (!tlinfo || tlinfo->tli != tli) { @@ -1571,7 +1594,8 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) /* append file to xlog file list */ wal_file = palloc(sizeof(xlogFile)); - wal_file->file = *file; + wal_file->name = ft_str_steal(&file.name); + wal_file->size = file.stat.pst_size; wal_file->segno = segno; wal_file->type = BACKUP_HISTORY_FILE; wal_file->keep = false; @@ -1579,10 +1603,10 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) continue; } /* partial WAL segment */ - else if (IsPartialXLogFileName(file->name) || - IsPartialCompressXLogFileName(file->name)) + else if (IsPartialXLogFileName(file.name.ptr) || + IsPartialCompressXLogFileName(file.name)) { - elog(VERBOSE, "partial WAL file \"%s\"", file->name); + elog(VERBOSE, "partial WAL file \"%s\"", file.name.ptr); if (!tlinfo || tlinfo->tli != tli) { @@ -1592,7 +1616,8 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) /* append file to xlog file list */ wal_file = palloc(sizeof(xlogFile)); - wal_file->file = *file; + wal_file->name = ft_str_steal(&file.name); + wal_file->size = file.stat.pst_size; wal_file->segno = segno; wal_file->type = PARTIAL_SEGMENT; wal_file->keep = false; @@ -1600,10 +1625,10 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) continue; } /* temp WAL segment */ - else if (IsTempXLogFileName(file->name) || - IsTempCompressXLogFileName(file->name)) + else if (IsTempXLogFileName(file.name) || + IsTempCompressXLogFileName(file.name)) { - elog(VERBOSE, "temp WAL file \"%s\"", file->name); + elog(VERBOSE, "temp WAL file \"%s\"", file.name.ptr); if (!tlinfo || tlinfo->tli != tli) { @@ -1613,7 +1638,8 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) /* append file to xlog file list */ wal_file = palloc(sizeof(xlogFile)); - wal_file->file = *file; + wal_file->name = ft_str_steal(&file.name); + wal_file->size = file.stat.pst_size; wal_file->segno = segno; wal_file->type = TEMP_SEGMENT; wal_file->keep = false; @@ -1623,7 +1649,7 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) /* we only expect compressed wal files with .gz suffix */ else if (strcmp(suffix, "gz") != 0) { - elog(WARNING, "unexpected WAL file name \"%s\"", file->name); + elog(WARNING, "unexpected WAL file name \"%s\"", file.name.ptr); continue; } } @@ -1671,22 +1697,23 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) tlinfo->end_segno = segno; /* update counters */ tlinfo->n_xlog_files++; - tlinfo->size += file->size; + tlinfo->size += file.stat.pst_size; /* append file to xlog file list */ wal_file = palloc(sizeof(xlogFile)); - wal_file->file = *file; + wal_file->name = ft_str_steal(&file.name); + wal_file->size = file.stat.pst_size; wal_file->segno = segno; wal_file->type = SEGMENT; wal_file->keep = false; parray_append(tlinfo->xlog_filelist, wal_file); } /* timeline history file */ - else if (IsTLHistoryFileName(file->name)) + else if (IsTLHistoryFileName(file.name.ptr)) { TimeLineHistoryEntry *tln; - sscanf(file->name, "%08X.history", &tli); + sscanf(file.name.ptr, "%08X.history", &tli); timelines = read_timeline_history(instanceState->instance_wal_subdir_path, tli, true); /* History file is empty or corrupted, disregard it */ @@ -1721,7 +1748,7 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) parray_free(timelines); } else - elog(WARNING, "unexpected WAL file name \"%s\"", file->name); + elog(WARNING, "unexpected WAL file name \"%s\"", file.name.ptr); } /* save information about backups belonging to each timeline */ diff --git a/src/delete.c b/src/delete.c index cc2ae1147..1b817ae32 100644 --- a/src/delete.c +++ b/src/delete.c @@ -877,7 +877,7 @@ delete_walfiles_in_tli(InstanceState *instanceState, XLogRecPtr keep_lsn, timeli xlogFile *wal_file = (xlogFile *) parray_get(tlinfo->xlog_filelist, i); if (purge_all || wal_file->segno < OldestToKeepSegNo) - wal_size_actual += wal_file->file.size; + wal_size_actual += wal_file->size; } /* Report the actual size to delete */ @@ -905,7 +905,7 @@ delete_walfiles_in_tli(InstanceState *instanceState, XLogRecPtr keep_lsn, timeli { char wal_fullpath[MAXPGPATH]; - join_path_components(wal_fullpath, instanceState->instance_wal_subdir_path, wal_file->file.name); + join_path_components(wal_fullpath, instanceState->instance_wal_subdir_path, wal_file->name.ptr); /* save segment from purging */ if (wal_file->keep) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index ddb5d4c74..95a7b7ea8 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -613,7 +613,8 @@ typedef enum xlogFileType typedef struct xlogFile { - pgFile file; + ft_str_t name; + int64_t size; XLogSegNo segno; xlogFileType type; bool keep; /* Used to prevent removal of WAL segments @@ -686,19 +687,19 @@ typedef struct StopBackupCallbackParams XLogFromFileName(fname, tli, logSegNo, wal_segsz_bytes) #define IsPartialCompressXLogFileName(fname) \ - (strlen(fname) == XLOG_FNAME_LEN + strlen(".gz.partial") && \ - strspn(fname, "0123456789ABCDEF") == XLOG_FNAME_LEN && \ - strcmp((fname) + XLOG_FNAME_LEN, ".gz.partial") == 0) + ((fname).len == XLOG_FNAME_LEN + strlen(".gz.partial") && \ + ft_str_spnc((fname), "0123456789ABCDEF") == XLOG_FNAME_LEN && \ + ft_str_ends_withc((fname), ".gz.partial")) #define IsTempXLogFileName(fname) \ - (strlen(fname) == XLOG_FNAME_LEN + strlen("~tmp") + 6 && \ - strspn(fname, "0123456789ABCDEF") == XLOG_FNAME_LEN && \ - strncmp((fname) + XLOG_FNAME_LEN, "~tmp", 4) == 0) + ((fname).len == XLOG_FNAME_LEN + strlen("~tmp") + 6 && \ + ft_str_spnc((fname), "0123456789ABCDEF") == XLOG_FNAME_LEN && \ + ft_str_find_cstr((fname), "~tmp") == XLOG_FNAME_LEN) #define IsTempCompressXLogFileName(fname) \ - (strlen(fname) == XLOG_FNAME_LEN + strlen(".gz~tmp") + 6 && \ - strspn(fname, "0123456789ABCDEF") == XLOG_FNAME_LEN && \ - strncmp((fname) + XLOG_FNAME_LEN, ".gz~tmp", 7) == 0) + ((fname).len == XLOG_FNAME_LEN + strlen(".gz~tmp") + 6 && \ + ft_str_spnc((fname), "0123456789ABCDEF") == XLOG_FNAME_LEN && \ + ft_str_find_cstr((fname), ".gz~tmp") == XLOG_FNAME_LEN) #define IsSshProtocol() (instance_config.remote.host && strcmp(instance_config.remote.proto, "ssh") == 0) diff --git a/src/utils/file.c b/src/utils/file.c index d5801b3a4..1145ed6a4 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -2928,10 +2928,6 @@ typedef struct pioRemoteWriteFile { iface(pioWriteCloser, pioDBWriter) fobj_klass(pioRemoteWriteFile); -#define FT_SLICE dirent -#define FT_SLICE_TYPE pio_dirent_t -#include - typedef struct pioRemoteDir { ft_str_t path; diff --git a/src/utils/file.h b/src/utils/file.h index 07394b226..d3eac3f3c 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -122,6 +122,20 @@ typedef struct pio_dirent { ft_str_t name; } pio_dirent_t; +#define FT_SLICE dirent +#define FT_SLICE_TYPE pio_dirent_t +#include + +#define FT_SORT dirent +#define FT_SORT_TYPE pio_dirent_t +#include + +ft_inline int +compare_dirent_by_name(pio_dirent_t d1, pio_dirent_t d2) +{ + return ft_strcmp(d1.name, d2.name); +} + extern fio_location MyLocation; extern void setMyLocation(ProbackupSubcmd const subcmd); From f30c94a192eb975aa09ce07e3d7d76f1df56c932 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 18 Dec 2022 04:58:20 +0300 Subject: [PATCH 223/339] [PBCKP-416] remove backup_list_dir --- src/utils/file.c | 7 ------- src/utils/file.h | 1 - 2 files changed, 8 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index 1145ed6a4..001f0aeea 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1806,13 +1806,6 @@ void db_list_dir(parray *files, const char *root, bool handle_tablespaces, .external_dir_num = external_dir_num); } -void backup_list_dir(parray *files, const char *root) { - pioDrive_i drive = pioDriveForLocation(FIO_BACKUP_HOST); - $i(pioListDir, drive, .files = files, .root = root, .handle_tablespaces = false, - .symlink_and_hidden = false, .backup_logs = false, .skip_hidden = false, - .external_dir_num = 0); -} - /* * WARNING! this function is not paired with fio_remove_dir * because there is no such function. Instead, it is paired diff --git a/src/utils/file.h b/src/utils/file.h index d3eac3f3c..aa487397d 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -180,7 +180,6 @@ extern pid_t fio_check_postmaster(fio_location location, const char *pgdata); extern void db_list_dir(parray *files, const char *root, bool handle_tablespaces, bool backup_logs, int external_dir_num); -extern void backup_list_dir(parray *files, const char *root); extern PageState *fio_get_checksum_map(fio_location location, const char *fullpath, uint32 checksum_version, int n_blocks, XLogRecPtr dest_stop_lsn, BlockNumber segmentno); From 65fe3d032db33abceddb4ba7fd28452c3eccdee3 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 18 Dec 2022 05:20:27 +0300 Subject: [PATCH 224/339] [PBCKP-416] use pioOpenDir in archive.c setup_push_filelist --- src/archive.c | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/src/archive.c b/src/archive.c index d84271e73..16f292194 100644 --- a/src/archive.c +++ b/src/archive.c @@ -92,7 +92,7 @@ static int push_file(WALSegno *xlogfile, const char *archive_status_dir, bool no_ready_rename, bool is_compress, int compress_level); -static parray *setup_push_filelist(const char *archive_status_dir, +static parray *setup_push_filelist(pioDrive_i drive, const char *archive_status_dir, const char *first_file, int batch_size); /* @@ -141,7 +141,8 @@ do_archive_push(InstanceState *instanceState, InstanceConfig *instance, char *pg #endif /* Setup filelist and locks */ - batch_files = setup_push_filelist(archive_status_dir, wal_file_name, batch_size); + batch_files = setup_push_filelist(instanceState->database_location, + archive_status_dir, wal_file_name, batch_size); n_threads = num_threads; if (num_threads > parray_num(batch_files)) @@ -517,13 +518,15 @@ push_file_internal(const char *wal_file_name, const char *pg_xlog_dir, * and pack such files into batch sized array. */ parray * -setup_push_filelist(const char *archive_status_dir, const char *first_file, - int batch_size) +setup_push_filelist(pioDrive_i drive, const char *archive_status_dir, + const char *first_file, int batch_size) { - int i; + FOBJ_FUNC_ARP(); WALSegno *xlogfile = NULL; - parray *status_files = NULL; parray *batch_files = parray_new(); + pioDirIter_i iter; + pio_dirent_t entry; + err_i err; /* guarantee that first filename is in batch list */ xlogfile = palloc(sizeof(WALSegno)); @@ -535,18 +538,17 @@ setup_push_filelist(const char *archive_status_dir, const char *first_file, return batch_files; /* get list of files from archive_status */ - status_files = parray_new(); - db_list_dir(status_files, archive_status_dir, false, false, 0); - parray_qsort(status_files, pgFileCompareName); + iter = $i(pioOpenDir, drive, archive_status_dir, .err = &err); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Reading push filelist"); - for (i = 0; i < parray_num(status_files); i++) + while ((entry=$i(pioDirNext, iter, .err=&err)).stat.pst_kind) { int result = 0; char filename[MAXFNAMELEN]; char suffix[MAXFNAMELEN]; - pgFile *file = (pgFile *) parray_get(status_files, i); - result = sscanf(file->name, "%[^.]%s", (char *) &filename, (char *) &suffix); + result = sscanf(entry.name.ptr, "%[^.]%s", (char *) &filename, (char *) &suffix); if (result != 2) continue; @@ -568,9 +570,9 @@ setup_push_filelist(const char *archive_status_dir, const char *first_file, break; } - /* cleanup */ - parray_walk(status_files, pgFileFree); - parray_free(status_files); + $i(pioClose, iter); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Reading push filelist"); return batch_files; } From dfb5b0dc0d95fc5fac693902c4f88b3c1a863311 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 18 Dec 2022 15:23:35 +0300 Subject: [PATCH 225/339] [PBCKP-416] pioLocalDrive_pioRemoveDir to not use pioListDir yep, this way code is larger. But it self contained. --- src/pg_probackup.h | 8 ++++ src/utils/file.c | 117 +++++++++++++++++++++++++++++++++++---------- 2 files changed, 100 insertions(+), 25 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 95a7b7ea8..04cbb369a 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1215,4 +1215,12 @@ extern XLogRecPtr wait_wal_lsn(const char *wal_segment_dir, XLogRecPtr lsn, bool extern void wait_wal_and_calculate_stop_lsn(const char *xlog_path, XLogRecPtr stop_lsn, pgBackup *backup); extern int64 calculate_datasize_of_filelist(parray *filelist); + +/* + * Slices and arrays for C strings + */ +#define FT_SLICE cstr +#define FT_SLICE_TYPE char* +#include + #endif /* PG_PROBACKUP_H */ diff --git a/src/utils/file.c b/src/utils/file.c index 001f0aeea..bc111af4e 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -3377,39 +3377,106 @@ pioLocalDrive_pioRemoveDir(VSelf, const char *root, bool root_as_well) { FOBJ_FUNC_ARP(); Self(pioLocalDrive); char full_path[MAXPGPATH]; - /* list files to be deleted */ - parray* files = parray_new(); - $(pioListDir, self, .files = files, .root = root, .handle_tablespaces = false, - .symlink_and_hidden = false, .backup_logs = false, .skip_hidden = false, .external_dir_num = 0); + ft_arr_cstr_t dirs = ft_arr_init(); + ft_arr_cstr_t files = ft_arr_init(); + char *dirname; + char *filename; + DIR* dir; + struct dirent *dirent; + struct stat st; + size_t i; + /* note: we don't dup root, so will not free it */ + ft_arr_cstr_push(&dirs, (char*)root); - // adding the root directory because it must be deleted too - if(root_as_well) - parray_append(files, pgFileNew(root, "", false, 0, $bind(pioDrive, self))); + for (i = 0; i < dirs.len; i++) /* note that dirs.len will grow */ + { + dirname = dirs.ptr[i]; + dir = opendir(dirname); - /* delete leaf node first */ - parray_qsort(files, pgFileCompareRelPathWithExternalDesc); - size_t num_files = parray_num(files); - for (int i = 0; i < num_files; i++) - { - pgFile *file = (pgFile *) parray_get(files, i); + if (dir == NULL) + { + if (errno == ENOENT) + { + elog(WARNING, "Dir \"%s\" disappeared", dirname); + dirs.ptr[i] = NULL; + if (i != 0) + ft_free(dirname); + continue; + } + else + elog(ERROR, "Cannot open dir \"%s\": %m", dirname); + } - join_path_components(full_path, root, file->rel_path); + for(errno=0; (dirent = readdir(dir)) != NULL; errno=0) + { + if (strcmp(dirent->d_name, ".") == 0 || + strcmp(dirent->d_name, "..") == 0) + continue; - if (interrupted) - elog(ERROR, "interrupted during the directory deletion: %s", full_path); + join_path_components(full_path, dirname, dirent->d_name); + if (stat(full_path, &st)) + { + if (errno == ENOENT) + { + elog(WARNING, "File \"%s\" disappeared", full_path); + continue; + } + elog(ERROR, "Could not stat \"%s\": %m", full_path); + } - if (progress) - elog(INFO, "Progress: (%d/%zd). Delete file \"%s\"", - i + 1, num_files, full_path); + if (S_ISDIR(st.st_mode)) + ft_arr_cstr_push(&dirs, ft_cstrdup(full_path)); + else + ft_arr_cstr_push(&files, ft_cstrdup(dirent->d_name)); + } + if (errno) + elog(ERROR, "Could not readdir \"%s\": %m", full_path); + closedir(dir); - err_i err = $(pioRemove, self, full_path, false); - if($haserr(err)) - elog(ERROR, "Cannot remove file or directory \"%s\": %s", full_path, $errmsg(err)); - } + while (files.len > 0) + { + filename = ft_arr_cstr_pop(&files); + join_path_components(full_path, dirname, filename); + ft_free(filename); + + if (progress) + elog(INFO, "Progress: delete file \"%s\"", full_path); + if (remove_file_or_dir(full_path) != 0) + { + if (errno == ENOENT) + elog(WARNING, "File \"%s\" disappeared", full_path); + else + elog(ERROR, "Could not remove \"%s\": %m", full_path); + } + } + } + + while (dirs.len > 0) + { + dirname = ft_arr_cstr_pop(&dirs); + if (dirname == NULL) + continue; + + if (dirs.len == 0 && !root_as_well) + break; + + if (progress) + elog(INFO, "Progress: delete dir \"%s\"", full_path); + if (remove_file_or_dir(dirname) != 0) + { + if (errno == ENOENT) + elog(WARNING, "Dir \"%s\" disappeared", full_path); + else + elog(ERROR, "Could not remove \"%s\": %m", full_path); + } + + if (dirs.len != 0) /* we didn't dup root, so don't free it */ + ft_free(dirname); + } - parray_walk(files, pgFileFree); - parray_free(files); + ft_arr_cstr_free(&dirs); + ft_arr_cstr_free(&files); } static ft_bytes_t From 2c756c3397a295f46cce742839ff2a9d1f4f5a87 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 18 Dec 2022 15:04:46 +0300 Subject: [PATCH 226/339] Revert "[PBCKP-234] proper pioListDir usage" partially This reverts commit 4ecb11f45db906375358be9c9ad0106434169374. It seems it would be easier to change to pioOpenDir with rollback. But we keep pioRemoveDir usage. --- src/backup.c | 8 +- src/catchup.c | 6 +- src/checkdb.c | 3 +- src/dir.c | 268 ++++++++++++++++++++++--------- src/pg_probackup.h | 5 +- src/restore.c | 5 +- src/utils/file.c | 381 --------------------------------------------- src/utils/file.h | 9 +- 8 files changed, 211 insertions(+), 474 deletions(-) diff --git a/src/backup.c b/src/backup.c index 0a56f765c..c1f5d8065 100644 --- a/src/backup.c +++ b/src/backup.c @@ -275,8 +275,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, join_path_components(external_prefix, current.root_dir, EXTERNAL_DIR); /* list files with the logical path. omit $PGDATA */ - db_list_dir(backup_files_list, instance_config.pgdata, true, backup_logs, 0); - exclude_files(backup_files_list, backup_logs); + db_list_dir(backup_files_list, instance_config.pgdata, true, backup_logs, 0, FIO_DB_HOST); /* * Get database_map (name to oid) for use in partial restore feature. @@ -292,9 +291,8 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, { for (i = 0; i < parray_num(external_dirs); i++) { - /* External dirs numeration starts with 1. - * 0 value is not external dir */ - db_list_dir(backup_files_list, parray_get(external_dirs, i), false, false, i+1); + db_list_dir(backup_files_list, parray_get(external_dirs, i), + false, false, i + 1, FIO_DB_HOST); } } diff --git a/src/catchup.c b/src/catchup.c index 4af3caed1..087b74fc6 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -648,9 +648,8 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, if (current.backup_mode != BACKUP_MODE_FULL) { dest_filelist = parray_new(); - db_list_dir(dest_filelist, dest_pgdata, true, false, 0); + db_list_dir(dest_filelist, dest_pgdata, true, false, 0, FIO_LOCAL_HOST); filter_filelist(dest_filelist, dest_pgdata, exclude_absolute_paths_list, exclude_relative_paths_list, "Destination"); - exclude_files(dest_filelist, false); // fill dest_redo.lsn and dest_redo.tli get_redo(FIO_LOCAL_HOST, dest_pgdata, &dest_redo); @@ -720,8 +719,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, source_filelist = parray_new(); /* list files with the logical path. omit $PGDATA */ - db_list_dir(source_filelist, source_pgdata, true, false, 0); - exclude_files(source_filelist, false); + db_list_dir(source_filelist, source_pgdata, true, false, 0, FIO_DB_HOST); //REVIEW FIXME. Let's fix that before release. // TODO what if wal is not a dir (symlink to a dir)? diff --git a/src/checkdb.c b/src/checkdb.c index 055e5beaf..e2b2e88cf 100644 --- a/src/checkdb.c +++ b/src/checkdb.c @@ -206,8 +206,7 @@ do_block_validation(char *pgdata, uint32 checksum_version) files_list = parray_new(); /* list files with the logical path. omit $PGDATA */ - db_list_dir(files_list, pgdata, true, false, 0); - exclude_files(files_list, false); + db_list_dir(files_list, pgdata, true, false, 0, FIO_DB_HOST); /* * Sort pathname ascending. diff --git a/src/dir.c b/src/dir.c index ec84644b1..90359e398 100644 --- a/src/dir.c +++ b/src/dir.c @@ -17,6 +17,10 @@ #include #include "utils/configuration.h" +#include "catalog/pg_tablespace.h" +#if PG_VERSION_NUM < 110000 +#include "catalog/catalog.h" +#endif /* * The contents of these directories are removed or recreated during server @@ -112,21 +116,17 @@ typedef struct TablespaceCreatedList TablespaceCreatedListCell *tail; } TablespaceCreatedList; -typedef struct exclude_cb_ctx { - bool backup_logs; - size_t pref_len; - char exclude_dir_content_pref[MAXPGPATH]; -} exclude_cb_ctx; - static char dir_check_file(pgFile *file, bool backup_logs); +static void dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, + bool exclude, bool backup_logs, + int external_dir_num, fio_location location); static void opt_path_map(ConfigOption *opt, const char *arg, TablespaceList *list, const char *type); static void cleanup_tablespace(const char *path); static void control_string_bad_format(ft_bytes_t str); -static bool exclude_files_cb(void *value, void *exclude_args); static void print_database_map(ft_strbuf_t *buf, parray *database_list); @@ -365,6 +365,44 @@ db_map_entry_free(void *entry) free(entry); } +/* + * List files and directories in the directory "root" and add + * pgFile objects to "files". + */ +void +db_list_dir(parray *files, const char *root, bool exclude, + bool backup_logs, int external_dir_num, + fio_location location) +{ + pgFile *file; + pioDrive_i drive = pioDriveForLocation(location); + + file = pgFileNew(root, "", true, external_dir_num, drive); + if (file == NULL) + { + /* For external directory this is not ok */ + if (external_dir_num > 0) + elog(ERROR, "External directory is not found: \"%s\"", root); + else + return; + } + + if (file->kind != PIO_KIND_DIRECTORY) + { + if (external_dir_num > 0) + elog(ERROR, " --external-dirs option \"%s\": directory or symbolic link expected", + root); + else + elog(WARNING, "Skip \"%s\": unexpected file format", root); + return; + } + + dir_list_file_internal(files, file, root, exclude, + backup_logs, external_dir_num, location); + + pgFileFree(file); +} + #define CHECK_FALSE 0 #define CHECK_TRUE 1 #define CHECK_EXCLUDE_FALSE 2 @@ -386,6 +424,7 @@ static char dir_check_file(pgFile *file, bool backup_logs) { int i; + int sscanf_res; bool in_tablespace = false; in_tablespace = path_is_prefix_of_path(PG_TBLSPC_DIR, file->rel_path); @@ -433,6 +472,54 @@ dir_check_file(pgFile *file, bool backup_logs) } } + /* + * Do not copy tablespaces twice. It may happen if the tablespace is located + * inside the PGDATA. + */ + if (file->kind == PIO_KIND_DIRECTORY && + strcmp(file->name, TABLESPACE_VERSION_DIRECTORY) == 0) + { + Oid tblspcOid; + char tmp_rel_path[MAXPGPATH]; + + /* + * Valid path for the tablespace is + * pg_tblspc/tblsOid/TABLESPACE_VERSION_DIRECTORY + */ + if (!path_is_prefix_of_path(PG_TBLSPC_DIR, file->rel_path)) + return CHECK_FALSE; + sscanf_res = sscanf(file->rel_path, PG_TBLSPC_DIR "/%u/%s", + &tblspcOid, tmp_rel_path); + if (sscanf_res == 0) + return CHECK_FALSE; + } + + if (in_tablespace) + { + char tmp_rel_path[MAXPGPATH]; + + sscanf_res = sscanf(file->rel_path, PG_TBLSPC_DIR "/%u/%[^/]/%u/", + &(file->tblspcOid), tmp_rel_path, + &(file->dbOid)); + + /* + * We should skip other files and directories rather than + * TABLESPACE_VERSION_DIRECTORY, if this is recursive tablespace. + */ + if (sscanf_res == 2 && strcmp(tmp_rel_path, TABLESPACE_VERSION_DIRECTORY) != 0) + return CHECK_FALSE; + } + else if (path_is_prefix_of_path("global", file->rel_path)) + { + file->tblspcOid = GLOBALTABLESPACE_OID; + } + else if (path_is_prefix_of_path("base", file->rel_path)) + { + file->tblspcOid = DEFAULTTABLESPACE_OID; + + sscanf(file->rel_path, "base/%u/", &(file->dbOid)); + } + /* Do not backup ptrack_init files */ if (file->kind == PIO_KIND_REGULAR && strcmp(file->name, "ptrack_init") == 0) return CHECK_FALSE; @@ -469,79 +556,118 @@ dir_check_file(pgFile *file, bool backup_logs) } /* - * Excluding default files from the files list. - * Input: - * parray *files - an array of pgFile* to filter. - * croterion_fn - a callback that filters things out - * Output: - * true - if the file must be deleted from the list - * false - otherwise + * List files in parent->path directory. If "exclude" is true do not add into + * "files" files from pgdata_exclude_files and directories from + * pgdata_exclude_dir. + * + * TODO: should we check for interrupt here ? */ +static void +dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, + bool exclude, bool backup_logs, + int external_dir_num, fio_location location) +{ + DIR *dir; + struct dirent *dent; + pioDrive_i drive; -static bool -exclude_files_cb(void *value, void *exclude_args) { - pgFile *file = (pgFile*) value; - exclude_cb_ctx *ex_ctx = (exclude_cb_ctx*) exclude_args; + if (parent->kind != PIO_KIND_DIRECTORY) + elog(ERROR, "\"%s\" is not a directory", parent_dir); - /* - * Check the file relative path for previously excluded dir prefix. These files - * should not be in the list, only their empty parent directory, see dir_check_file. - * - * Assuming that the excluded dir is ALWAYS followed by its content like this: - * pref/dir/ - * pref/dir/file1 - * pref/dir/file2 - * pref/dir/file3 - * ... - * we can make prefix checks only for files that subsequently follow the excluded dir - * and avoid unnecessary checks for the rest of the files. So we store the prefix length, - * update it and the prefix itself once we've got a CHECK_EXCLUDE_FALSE status code, - * keep doing prefix checks while there are files in that directory and set prefix length - * to 0 once they are gone. - */ - if(ex_ctx->pref_len > 0 - && strncmp(ex_ctx->exclude_dir_content_pref, file->rel_path, ex_ctx->pref_len) == 0) { - return true; - } else { - memset(ex_ctx->exclude_dir_content_pref, 0, ex_ctx->pref_len); - ex_ctx->pref_len = 0; + drive = pioDriveForLocation(location); + + /* Open directory and list contents */ + dir = fio_opendir(location, parent_dir); + if (dir == NULL) + { + if (errno == ENOENT) + { + /* Maybe the directory was removed */ + return; + } + elog(ERROR, "Cannot open directory \"%s\": %s", + parent_dir, strerror(errno)); } - int check_res = dir_check_file(file, ex_ctx->backup_logs); + errno = 0; + while ((dent = fio_readdir(dir))) + { + pgFile *file; + char child[MAXPGPATH]; + char rel_child[MAXPGPATH]; + char check_res; - switch(check_res) { - case CHECK_FALSE: - return true; - break; - case CHECK_TRUE:; - return false; - break; - case CHECK_EXCLUDE_FALSE: - // since the excluded dir always goes before its contents, memorize it - // and use it for further files filtering. - strcpy(ex_ctx->exclude_dir_content_pref, file->rel_path); - ex_ctx->pref_len = strlen(file->rel_path); - return false; - break; - default: - // Should not get there normally. - assert(false); - return false; - break; - } + join_path_components(child, parent_dir, dent->d_name); + join_path_components(rel_child, parent->rel_path, dent->d_name); - // Should not get there as well. - return false; -} + file = pgFileNew(child, rel_child, true, external_dir_num, + drive); + if (file == NULL) + continue; + + /* Skip entries point current dir or parent dir */ + if (file->kind == PIO_KIND_DIRECTORY && + (strcmp(dent->d_name, ".") == 0 || strcmp(dent->d_name, "..") == 0)) + { + pgFileFree(file); + continue; + } -void exclude_files(parray *files, bool backup_logs) { - exclude_cb_ctx ctx = { - .pref_len = 0, - .backup_logs = backup_logs, - .exclude_dir_content_pref = "\0", - }; + /* skip hidden files and directories */ + if (file->name[0] == '.') + { + elog(WARNING, "Skip hidden file: '%s'", child); + pgFileFree(file); + continue; + } - parray_remove_if(files, exclude_files_cb, (void*)&ctx, pgFileFree); + /* + * Add only files, directories and links. Skip sockets and other + * unexpected file formats. + */ + if (file->kind != PIO_KIND_DIRECTORY && file->kind != PIO_KIND_REGULAR) + { + elog(WARNING, "Skip '%s': unexpected file format", child); + pgFileFree(file); + continue; + } + + if (exclude) + { + check_res = dir_check_file(file, backup_logs); + if (check_res == CHECK_FALSE) + { + /* Skip */ + pgFileFree(file); + continue; + } + else if (check_res == CHECK_EXCLUDE_FALSE) + { + /* We add the directory itself which content was excluded */ + parray_append(files, file); + continue; + } + } + + parray_append(files, file); + + /* + * If the entry is a directory call dir_list_file_internal() + * recursively. + */ + if (file->kind == PIO_KIND_DIRECTORY) + dir_list_file_internal(files, file, child, exclude, + backup_logs, external_dir_num, location); + } + + if (errno && errno != ENOENT) + { + int errno_tmp = errno; + fio_closedir(dir); + elog(ERROR, "Cannot read directory \"%s\": %s", + parent_dir, strerror(errno_tmp)); + } + fio_closedir(dir); } /* diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 04cbb369a..6432b794d 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -966,6 +966,10 @@ extern ft_str_t pb_control_line_get_str(pb_control_line *pb_line, const char *na extern bool pb_control_line_try_int64(pb_control_line *pb_line, const char *name, int64 *value); extern bool pb_control_line_try_str(pb_control_line *pb_line, const char *name, ft_str_t *value); +extern void db_list_dir(parray *files, const char *root, bool exclude, + bool backup_logs, + int external_dir_num, fio_location location); + extern const char *get_tablespace_mapping(const char *dir); extern void create_data_directories(parray *dest_files, const char *data_dir, @@ -1019,7 +1023,6 @@ extern int pgPrefixCompareString(const void *str1, const void *str2); extern int pgCompareOid(const void *f1, const void *f2); extern void pfilearray_clear_locks(parray *file_list); extern bool set_forkname(pgFile *file); -extern void exclude_files(parray *files, bool backup_logs); /* in data.c */ extern bool check_data_file(pgFile *file, const char *from_fullpath, uint32 checksum_version); diff --git a/src/restore.c b/src/restore.c index eb5c4a733..81203fbb6 100644 --- a/src/restore.c +++ b/src/restore.c @@ -871,7 +871,7 @@ restore_chain(InstanceState *instanceState, elog(INFO, "Extracting the content of destination directory for incremental restore"); time(&start_time); - db_list_dir(pgdata_files, pgdata_path, false, false, 0); + db_list_dir(pgdata_files, pgdata_path, false, false, 0, FIO_DB_HOST); /* * TODO: @@ -891,7 +891,8 @@ restore_chain(InstanceState *instanceState, char *external_path = parray_get(external_dirs, i); parray *external_files = parray_new(); - db_list_dir(external_files, external_path, false, false, i+1); + db_list_dir(external_files, external_path, + false, false, i + 1, FIO_DB_HOST); parray_concat(pgdata_files, external_files); parray_free(external_files); diff --git a/src/utils/file.c b/src/utils/file.c index bc111af4e..6d8432d55 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -5,10 +5,6 @@ #include #include "file.h" -#include "catalog/pg_tablespace.h" -#if PG_VERSION_NUM < 110000 -#include "catalog/catalog.h" -#endif #include "storage/checksum.h" #define PRINTF_BUF_SIZE 1024 @@ -39,16 +35,6 @@ typedef struct int path_len; } fio_send_request; -typedef struct -{ - char path[MAXPGPATH]; - bool handle_tablespaces; - bool follow_symlink; - bool backup_logs; - bool skip_hidden; - int external_dir_num; -} fio_list_dir_request; - typedef struct { char path[MAXPGPATH]; bool root_as_well; @@ -115,13 +101,6 @@ struct __attribute__((packed)) fio_req_open_write { #undef fopen #endif -static void dir_list_file(parray *files, const char *root, bool handle_tablespaces, - bool follow_symlink, bool backup_logs, bool skip_hidden, - int external_dir_num, pioDBDrive_i drive); -static void dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, - bool handle_tablespaces, bool follow_symlink, bool backup_logs, - bool skip_hidden, int external_dir_num, pioDBDrive_i drive); - void setMyLocation(ProbackupSubcmd const subcmd) { @@ -1798,14 +1777,6 @@ pgFileGetCRC32(const char *file_path, bool missing_ok) } #endif /* PG_VERSION_NUM < 120000 */ -void db_list_dir(parray *files, const char *root, bool handle_tablespaces, - bool backup_logs, int external_dir_num) { - pioDrive_i drive = pioDriveForLocation(FIO_DB_HOST); - $i(pioListDir, drive, .files = files, .root = root, .handle_tablespaces = handle_tablespaces, - .symlink_and_hidden = true, .backup_logs = backup_logs, .skip_hidden = true, - .external_dir_num = external_dir_num); -} - /* * WARNING! this function is not paired with fio_remove_dir * because there is no such function. Instead, it is paired @@ -1826,272 +1797,6 @@ fio_remove_dir_impl(int out, char* buf) { IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); } -/* - * List files, symbolic links and directories in the directory "root" and add - * pgFile objects to "files". We add "root" to "files" if add_root is true. - * - * When follow_symlink is true, symbolic link is ignored and only file or - * directory linked to will be listed. - * - * TODO: make it strictly local - */ -static void -dir_list_file(parray *files, const char *root, bool handle_tablespaces, bool follow_symlink, - bool backup_logs, bool skip_hidden, int external_dir_num, pioDBDrive_i drive) -{ - pgFile *file; - - Assert(!$i(pioIsRemote, drive)); - - file = pgFileNew(root, "", follow_symlink, external_dir_num, - $reduce(pioDrive, drive)); - if (file == NULL) - { - /* For external directory this is not ok */ - if (external_dir_num > 0) - elog(ERROR, "External directory is not found: \"%s\"", root); - else - return; - } - - if (file->kind != PIO_KIND_DIRECTORY) - { - if (external_dir_num > 0) - elog(ERROR, " --external-dirs option \"%s\": directory or symbolic link expected", - root); - else - elog(WARNING, "Skip \"%s\": unexpected file format", root); - return; - } - - dir_list_file_internal(files, file, root, handle_tablespaces, follow_symlink, - backup_logs, skip_hidden, external_dir_num, drive); - - pgFileFree(file); -} - -/* - * List files in parent->path directory. - * If "handle_tablespaces" is true, handle recursive tablespaces - * and the ones located inside pgdata. - * If "follow_symlink" is true, follow symlinks so that the - * fio_stat call fetches the info from the file pointed to by the - * symlink, not from the symlink itself. - * - * TODO: should we check for interrupt here ? - */ -static void -dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, - bool handle_tablespaces, bool follow_symlink, bool backup_logs, - bool skip_hidden, int external_dir_num, pioDBDrive_i drive) -{ - DIR *dir; - struct dirent *dent; - bool in_tablespace = false; - - Assert(!$i(pioIsRemote, drive)); - - if (parent->kind != PIO_KIND_DIRECTORY) - elog(ERROR, "\"%s\" is not a directory", parent_dir); - - in_tablespace = path_is_prefix_of_path(PG_TBLSPC_DIR, parent->rel_path); - - /* Open directory and list contents */ - dir = opendir(parent_dir); - if (dir == NULL) - { - if (errno == ENOENT) - { - /* Maybe the directory was removed */ - return; - } - elog(ERROR, "Cannot open directory \"%s\": %s", - parent_dir, strerror(errno)); - } - - errno = 0; - while ((dent = readdir(dir))) - { - pgFile *file; - char child[MAXPGPATH]; - char rel_child[MAXPGPATH]; - - join_path_components(child, parent_dir, dent->d_name); - join_path_components(rel_child, parent->rel_path, dent->d_name); - - file = pgFileNew(child, rel_child, follow_symlink, - external_dir_num, $reduce(pioDrive, drive)); - if (file == NULL) - continue; - - /* Skip entries point current dir or parent dir */ - if (file->kind == PIO_KIND_DIRECTORY && - (strcmp(dent->d_name, ".") == 0 || strcmp(dent->d_name, "..") == 0)) - { - pgFileFree(file); - continue; - } - - /* skip hidden files and directories */ - if (skip_hidden && file->name[0] == '.') { - elog(WARNING, "Skip hidden file: '%s'", child); - pgFileFree(file); - continue; - } - - /* - * Add only files, directories and links. Skip sockets and other - * unexpected file formats. - */ - if (file->kind != PIO_KIND_DIRECTORY && file->kind != PIO_KIND_REGULAR) - { - elog(WARNING, "Skip '%s': unexpected file format", child); - pgFileFree(file); - continue; - } - - if(handle_tablespaces) { - /* - * Do not copy tablespaces twice. It may happen if the tablespace is located - * inside the PGDATA. - */ - if (file->kind == PIO_KIND_DIRECTORY && - strcmp(file->name, TABLESPACE_VERSION_DIRECTORY) == 0) - { - Oid tblspcOid; - char tmp_rel_path[MAXPGPATH]; - int sscanf_res; - - /* - * Valid path for the tablespace is - * pg_tblspc/tblsOid/TABLESPACE_VERSION_DIRECTORY - */ - if (!path_is_prefix_of_path(PG_TBLSPC_DIR, file->rel_path)) - continue; - sscanf_res = sscanf(file->rel_path, PG_TBLSPC_DIR "/%u/%s", - &tblspcOid, tmp_rel_path); - if (sscanf_res == 0) - continue; - } - - if (in_tablespace) { - char tmp_rel_path[MAXPGPATH]; - ssize_t sscanf_res; - - sscanf_res = sscanf(file->rel_path, PG_TBLSPC_DIR "/%u/%[^/]/%u/", - &(file->tblspcOid), tmp_rel_path, - &(file->dbOid)); - - /* - * We should skip other files and directories rather than - * TABLESPACE_VERSION_DIRECTORY, if this is recursive tablespace. - */ - if (sscanf_res == 2 && strcmp(tmp_rel_path, TABLESPACE_VERSION_DIRECTORY) != 0) - continue; - } else if (path_is_prefix_of_path("global", file->rel_path)) { - file->tblspcOid = GLOBALTABLESPACE_OID; - } else if (path_is_prefix_of_path("base", file->rel_path)) { - file->tblspcOid = DEFAULTTABLESPACE_OID; - sscanf(file->rel_path, "base/%u/", &(file->dbOid)); - } - } - - parray_append(files, file); - - /* - * If the entry is a directory call dir_list_file_internal() - * recursively. - */ - if (file->kind == PIO_KIND_DIRECTORY) - dir_list_file_internal(files, file, child, handle_tablespaces, follow_symlink, - backup_logs, skip_hidden, external_dir_num, drive); - } - - if (errno && errno != ENOENT) - { - int errno_tmp = errno; - closedir(dir); - elog(ERROR, "Cannot read directory \"%s\": %s", - parent_dir, strerror(errno_tmp)); - } - closedir(dir); -} - -/* - * To get the arrays of files we use the same function dir_list_file(), - * that is used for local backup. - * After that we iterate over arrays and for every file send at least - * two messages to main process: - * 1. rel_path - * 2. metainformation (size, mtime, etc) - * 3. link path (optional) - * - * TODO: replace FIO_SEND_FILE and FIO_SEND_FILE_EOF with dedicated messages - */ -static void -fio_list_dir_impl(int out, char* buf, pioDBDrive_i drive) -{ - int i; - fio_header hdr; - fio_list_dir_request *req = (fio_list_dir_request*) buf; - parray *file_files = parray_new(); - - /* - * Disable logging into console any messages with exception of ERROR messages, - * because currently we have no mechanism to notify the main process - * about then message been sent. - * TODO: correctly send elog messages from agent to main process. - */ - instance_config.logger.log_level_console = ERROR; - - dir_list_file(file_files, req->path, req->handle_tablespaces, - req->follow_symlink, req->backup_logs, req->skip_hidden, - req->external_dir_num, drive); - - /* send information about files to the main process */ - for (i = 0; i < parray_num(file_files); i++) - { - fio_pgFile fio_file; - pgFile *file = (pgFile *) parray_get(file_files, i); - - fio_file.kind = file->kind; - fio_file.mode = file->mode; - fio_file.size = file->size; - fio_file.mtime = file->mtime; - fio_file.is_datafile = file->is_datafile; - fio_file.tblspcOid = file->tblspcOid; - fio_file.dbOid = file->dbOid; - fio_file.relOid = file->relOid; - fio_file.forkName = file->forkName; - fio_file.segno = file->segno; - fio_file.external_dir_num = file->external_dir_num; - - if (file->linked) - fio_file.linked_len = strlen(file->linked) + 1; - else - fio_file.linked_len = 0; - - hdr.cop = FIO_SEND_FILE; - hdr.size = strlen(file->rel_path) + 1; - - /* send rel_path first */ - IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(out, file->rel_path, hdr.size), hdr.size); - - /* now send file metainformation */ - IO_CHECK(fio_write_all(out, &fio_file, sizeof(fio_file)), sizeof(fio_file)); - - /* If file is a symlink, then send link path */ - if (file->linked) - IO_CHECK(fio_write_all(out, file->linked, fio_file.linked_len), fio_file.linked_len); - - pgFileFree(file); - } - - parray_free(file_files); - hdr = (fio_header){.cop = FIO_SEND_FILE_EOF}; - IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); -} PageState * fio_get_checksum_map(fio_location location, const char *fullpath, uint32 checksum_version, @@ -2515,9 +2220,6 @@ fio_communicate(int in, int out) case FIO_SEEK: /* Set current position in file */ fio_seek_impl(fd[hdr.handle], hdr.arg); break; - case FIO_LIST_DIR: - fio_list_dir_impl(out, buf, drive); - break; case FIO_REMOVE_DIR: fio_remove_dir_impl(out, buf); break; @@ -3306,16 +3008,6 @@ pioLocalDrive_pioMakeDir(VSelf, path_t path, mode_t mode, bool strict) return $syserr(errno, "Cannot make dir {path:q}", path(path)); } -static void -pioLocalDrive_pioListDir(VSelf, parray *files, const char *root, bool handle_tablespaces, - bool follow_symlink, bool backup_logs, bool skip_hidden, - int external_dir_num) { - FOBJ_FUNC_ARP(); - Self(pioLocalDrive); - dir_list_file(files, root, handle_tablespaces, follow_symlink, backup_logs, - skip_hidden, external_dir_num, $bind(pioDBDrive, self)); -} - static pioDirIter_i pioLocalDrive_pioOpenDir(VSelf, path_t path, err_i* err) { @@ -4123,79 +3815,6 @@ pioRemoteDrive_pioMakeDir(VSelf, path_t path, mode_t mode, bool strict) return $syserr(hdr.arg, "Cannot make dir {path:q}", path(path)); } -static void -pioRemoteDrive_pioListDir(VSelf, parray *files, const char *root, bool handle_tablespaces, - bool follow_symlink, bool backup_logs, bool skip_hidden, - int external_dir_num) { - FOBJ_FUNC_ARP(); - fio_header hdr; - fio_list_dir_request req; - char *buf = pgut_malloc(CHUNK_SIZE); - - /* Send to the agent message with parameters for directory listing */ - memset(&req, 0, sizeof(req)); - snprintf(req.path, MAXPGPATH, "%s", root); - req.handle_tablespaces = handle_tablespaces; - req.follow_symlink = follow_symlink; - req.backup_logs = backup_logs; - req.skip_hidden = skip_hidden; - req.external_dir_num = external_dir_num; - - hdr = (fio_header){.cop = FIO_LIST_DIR, .size=sizeof(req)}; - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(fio_stdout, &req, hdr.size), hdr.size); - - for (;;) { - /* receive data */ - IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); - - if (hdr.cop == FIO_SEND_FILE_EOF) { - /* the work is done */ - break; - } else if (hdr.cop == FIO_SEND_FILE) { - pgFile *file = NULL; - fio_pgFile fio_file; - - /* receive rel_path */ - IO_CHECK(fio_read_all(fio_stdin, buf, hdr.size), hdr.size); - file = pgFileInit(buf); - - /* receive metainformation */ - IO_CHECK(fio_read_all(fio_stdin, &fio_file, sizeof(fio_file)), sizeof(fio_file)); - - file->kind = fio_file.kind; - file->mode = fio_file.mode; - file->size = fio_file.size; - file->mtime = fio_file.mtime; - file->is_datafile = fio_file.is_datafile; - file->tblspcOid = fio_file.tblspcOid; - file->dbOid = fio_file.dbOid; - file->relOid = fio_file.relOid; - file->forkName = fio_file.forkName; - file->segno = fio_file.segno; - file->external_dir_num = fio_file.external_dir_num; - - if (fio_file.linked_len > 0) { - IO_CHECK(fio_read_all(fio_stdin, buf, fio_file.linked_len), fio_file.linked_len); - - file->linked = pgut_malloc(fio_file.linked_len); - snprintf(file->linked, fio_file.linked_len, "%s", buf); - } - -// elog(INFO, "Received file: %s, mode: %u, size: %lu, mtime: %lu", -// file->rel_path, file->mode, file->size, file->mtime); - - parray_append(files, file); - } else { - /* TODO: fio_disconnect may get assert fail when running after this */ - elog(ERROR, "Remote agent returned message of unexpected type: %i", hdr.cop); - } - } - - pg_free(buf); -} - static pioDirIter_i pioRemoteDrive_pioOpenDir(VSelf, path_t path, err_i* err) { diff --git a/src/utils/file.h b/src/utils/file.h index aa487397d..12903e178 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -178,9 +178,6 @@ extern int fio_remove(fio_location location, const char* path, bool missing_ extern ssize_t fio_readlink(fio_location location, const char *path, char *value, size_t valsiz); extern pid_t fio_check_postmaster(fio_location location, const char *pgdata); -extern void db_list_dir(parray *files, const char *root, bool handle_tablespaces, - bool backup_logs, int external_dir_num); - extern PageState *fio_get_checksum_map(fio_location location, const char *fullpath, uint32 checksum_version, int n_blocks, XLogRecPtr dest_stop_lsn, BlockNumber segmentno); struct datapagemap; /* defined in datapagemap.h */ @@ -299,9 +296,6 @@ fobj_iface(pioPagesIterator); #define mth__pioMakeDir err_i, (path_t, path), (mode_t, mode), (bool, strict) #define mth__pioOpenDir pioDirIter_i, (path_t, path), (err_i*, err) #define mth__pioIsDirEmpty bool, (path_t, path), (err_i*, err) -#define mth__pioListDir void, (parray *, files), (const char *, root), \ - (bool, handle_tablespaces), (bool, symlink_and_hidden), \ - (bool, backup_logs), (bool, skip_hidden), (int, external_dir_num) #define mth__pioRemoveDir void, (const char *, root), (bool, root_as_well) /* pioReadFile and pioWriteFile should be used only for small files */ #define PIO_READ_WRITE_FILE_LIMIT (16*1024*1024) @@ -329,7 +323,6 @@ fobj_method(pioMakeDir); fobj_method(pioFilesAreSame); fobj_method(pioOpenDir); fobj_method(pioIsDirEmpty); -fobj_method(pioListDir); fobj_method(pioRemoveDir); fobj_method(pioReadFile); fobj_method(pioWriteFile); @@ -338,7 +331,7 @@ fobj_method(pioIteratePages); #define iface__pioDrive mth(pioOpenRead, pioOpenReadStream), \ mth(pioStat, pioRemove), \ mth(pioExists, pioGetCRC32, pioIsRemote), \ - mth(pioMakeDir, pioOpenDir, pioIsDirEmpty, pioListDir, pioRemoveDir), \ + mth(pioMakeDir, pioOpenDir, pioIsDirEmpty, pioRemoveDir), \ mth(pioFilesAreSame, pioReadFile, pioWriteFile), \ mth(pioOpenRewrite) fobj_iface(pioDrive); From 7d552c6046abbadf005961be0d1aa8d20a30876b Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 18 Dec 2022 19:52:47 +0300 Subject: [PATCH 227/339] [PBCKP-416] pio_recursive_dir iterator. Iterator that could recursively walk through dir hierarchy and skip some subdirectories. --- src/utils/file.c | 132 +++++++++++++++++++++++++++++++++++++++++++++++ src/utils/file.h | 6 +++ 2 files changed, 138 insertions(+) diff --git a/src/utils/file.c b/src/utils/file.c index 6d8432d55..d7a20789b 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -2544,6 +2544,138 @@ fio_communicate(int in, int out) } } + +typedef struct pio_recursive_dir { + pioDrive_i drive; + ft_arr_cstr_t recurse; + ft_str_t root; + ft_str_t parent; + pioDirIter_i iter; + pio_dirent_t dirent; + bool dont_recurse_current; + ft_strbuf_t namebuf; +} pioRecursiveDir; +#define kls__pioRecursiveDir mth(fobjDispose) +fobj_klass(pioRecursiveDir); + +pio_recursive_dir_t* +pio_recursive_dir_alloc(pioDrive_i drive, path_t root, err_i *err) +{ + pioDirIter_i iter; + fobj_reset_err(err); + + iter = $i(pioOpenDir, drive, root, err); + if ($haserr(*err)) + return NULL; + + return $alloc(pioRecursiveDir, .drive = drive, + .root = ft_strdupc(root), + .parent = ft_strdupc(""), + .iter = $iref(iter), + .recurse = ft_arr_init(), + .namebuf = ft_strbuf_zero()); +} + +static pio_dirent_t +pio_recursive_dir_next_impl(pio_recursive_dir_t* self, err_i* err) +{ + if (self->dirent.stat.pst_kind == PIO_KIND_DIRECTORY && + !self->dont_recurse_current) + { + ft_arr_cstr_push(&self->recurse, ft_strdup(self->dirent.name).ptr); + } + + ft_strbuf_reset_for_reuse(&self->namebuf); + self->dont_recurse_current = false; + + self->dirent = $i(pioDirNext, self->iter, .err = err); + if ($haserr(*err)) + return self->dirent; + + if (self->dirent.stat.pst_kind != PIO_KIND_UNKNOWN) + { + ft_strbuf_cat(&self->namebuf, self->parent); + ft_strbuf_cat_path(&self->namebuf, self->dirent.name); + self->dirent.name = ft_strbuf_ref(&self->namebuf); + return self->dirent; + } + + *err = $i(pioClose, self->iter); + $idel(&self->iter); + if ($haserr(*err)) + return self->dirent; + +next_dir: + if (self->recurse.len == 0) + return self->dirent; + + ft_str_free(&self->parent); + self->parent = ft_cstr(ft_arr_cstr_pop(&self->recurse)); + + ft_strbuf_cat(&self->namebuf, self->root); + ft_strbuf_cat_path(&self->namebuf, self->parent); + + self->iter = $i(pioOpenDir, self->drive, .path = self->namebuf.ptr, + .err = err); + if ($haserr(*err)) + { + /* someone deleted dir under our feet */ + if (getErrno(*err) == ENOENT) + { + *err = $noerr(); + goto next_dir; + } + + return self->dirent; + } + + $iref(self->iter); + + return pio_recursive_dir_next_impl(self, err); +} + +pio_dirent_t +pio_recursive_dir_next(pio_recursive_dir_t* self, err_i* err) +{ + FOBJ_FUNC_ARP(); + pio_dirent_t ent; + fobj_reset_err(err); + + ent = pio_recursive_dir_next_impl(self, err); + $iresult(*err); + return ent; +} + +void +pio_recursive_dir_dont_recurse_current(pio_recursive_dir_t* self) +{ + ft_assert(self->dirent.stat.pst_kind == PIO_KIND_DIRECTORY); + self->dont_recurse_current = true; +} + +static void +pioRecursiveDir_fobjDispose(VSelf) +{ + Self(pioRecursiveDir); + + if ($notNULL(self->iter)) + $i(pioClose, self->iter); + $idel(&self->iter); + ft_str_free(&self->root); + ft_str_free(&self->parent); + ft_arr_cstr_free(&self->recurse); + ft_strbuf_free(&self->namebuf); +} + +void +pio_recursive_dir_free(pio_recursive_dir_t* self) +{ + /* we are releasing bound resources, + * but self will be dealloced in FOBJ's ARP */ + pioRecursiveDir_fobjDispose(self); +} +fobj_klass_handle(pioRecursiveDir); + // CLASSES typedef struct pioLocalDrive diff --git a/src/utils/file.h b/src/utils/file.h index 12903e178..bfecea155 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -438,6 +438,12 @@ extern void init_pio_line_reader(pio_line_reader *r, pioRead_i source, size_t ma extern void deinit_pio_line_reader(pio_line_reader *r); extern ft_bytes_t pio_line_reader_getline(pio_line_reader *r, err_i *err); +typedef struct pio_recursive_dir pio_recursive_dir_t; +extern pio_recursive_dir_t* pio_recursive_dir_alloc(pioDrive_i drive, path_t root, err_i *err); +extern pio_dirent_t pio_recursive_dir_next(pio_recursive_dir_t* dir, err_i* err); +extern void pio_recursive_dir_dont_recurse_current(pio_recursive_dir_t* dir); +extern void pio_recursive_dir_free(pio_recursive_dir_t* dir); + /* append path component */ extern bool ft_strbuf_cat_path(ft_strbuf_t *buf, ft_str_t path); ft_inline bool From ff779a6e5f9bbbfce09275b638d1cbb7699979de Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 18 Dec 2022 21:30:09 +0300 Subject: [PATCH 228/339] [PBCKP-416] rewrite db_list_dir with pio_recursive_dir --- src/dir.c | 139 ++++++++++++------------------------------------------ 1 file changed, 31 insertions(+), 108 deletions(-) diff --git a/src/dir.c b/src/dir.c index 90359e398..e30b1acd4 100644 --- a/src/dir.c +++ b/src/dir.c @@ -118,9 +118,6 @@ typedef struct TablespaceCreatedList static char dir_check_file(pgFile *file, bool backup_logs); -static void dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, - bool exclude, bool backup_logs, - int external_dir_num, fio_location location); static void opt_path_map(ConfigOption *opt, const char *arg, TablespaceList *list, const char *type); static void cleanup_tablespace(const char *path); @@ -365,44 +362,6 @@ db_map_entry_free(void *entry) free(entry); } -/* - * List files and directories in the directory "root" and add - * pgFile objects to "files". - */ -void -db_list_dir(parray *files, const char *root, bool exclude, - bool backup_logs, int external_dir_num, - fio_location location) -{ - pgFile *file; - pioDrive_i drive = pioDriveForLocation(location); - - file = pgFileNew(root, "", true, external_dir_num, drive); - if (file == NULL) - { - /* For external directory this is not ok */ - if (external_dir_num > 0) - elog(ERROR, "External directory is not found: \"%s\"", root); - else - return; - } - - if (file->kind != PIO_KIND_DIRECTORY) - { - if (external_dir_num > 0) - elog(ERROR, " --external-dirs option \"%s\": directory or symbolic link expected", - root); - else - elog(WARNING, "Skip \"%s\": unexpected file format", root); - return; - } - - dir_list_file_internal(files, file, root, exclude, - backup_logs, external_dir_num, location); - - pgFileFree(file); -} - #define CHECK_FALSE 0 #define CHECK_TRUE 1 #define CHECK_EXCLUDE_FALSE 2 @@ -556,81 +515,54 @@ dir_check_file(pgFile *file, bool backup_logs) } /* - * List files in parent->path directory. If "exclude" is true do not add into - * "files" files from pgdata_exclude_files and directories from - * pgdata_exclude_dir. - * - * TODO: should we check for interrupt here ? + * List files and directories in the directory "root" and add + * pgFile objects to "files". */ -static void -dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, - bool exclude, bool backup_logs, - int external_dir_num, fio_location location) +void +db_list_dir(parray *files, const char* root, + bool exclude, bool backup_logs, + int external_dir_num, fio_location location) { - DIR *dir; - struct dirent *dent; + FOBJ_FUNC_ARP(); + pio_dirent_t dent; pioDrive_i drive; - - if (parent->kind != PIO_KIND_DIRECTORY) - elog(ERROR, "\"%s\" is not a directory", parent_dir); + pio_recursive_dir_t* dir; + err_i err; drive = pioDriveForLocation(location); /* Open directory and list contents */ - dir = fio_opendir(location, parent_dir); + dir = pio_recursive_dir_alloc(drive, root, &err); if (dir == NULL) { - if (errno == ENOENT) + if (getErrno(err) == ENOENT && external_dir_num == 0) { /* Maybe the directory was removed */ + /* XXX: why the hell it is "ok" for non-external directories? */ return; } - elog(ERROR, "Cannot open directory \"%s\": %s", - parent_dir, strerror(errno)); + if (getErrno(err) == ENOTDIR && external_dir_num != 0) + { + elog(ERROR, " --external-dirs option \"%s\": directory or symbolic link expected", + root); + } + ft_logerr(FT_FATAL, $errmsg(err), "Listing directory"); } - errno = 0; - while ((dent = fio_readdir(dir))) + while ((dent = pio_recursive_dir_next(dir, &err)).stat.pst_kind) { pgFile *file; char child[MAXPGPATH]; - char rel_child[MAXPGPATH]; char check_res; - join_path_components(child, parent_dir, dent->d_name); - join_path_components(rel_child, parent->rel_path, dent->d_name); + join_path_components(child, root, dent.name.ptr); - file = pgFileNew(child, rel_child, true, external_dir_num, + file = pgFileNew(child, dent.name.ptr, true, external_dir_num, drive); if (file == NULL) continue; - - /* Skip entries point current dir or parent dir */ - if (file->kind == PIO_KIND_DIRECTORY && - (strcmp(dent->d_name, ".") == 0 || strcmp(dent->d_name, "..") == 0)) - { - pgFileFree(file); - continue; - } - - /* skip hidden files and directories */ - if (file->name[0] == '.') - { - elog(WARNING, "Skip hidden file: '%s'", child); - pgFileFree(file); - continue; - } - - /* - * Add only files, directories and links. Skip sockets and other - * unexpected file formats. - */ - if (file->kind != PIO_KIND_DIRECTORY && file->kind != PIO_KIND_REGULAR) - { - elog(WARNING, "Skip '%s': unexpected file format", child); - pgFileFree(file); - continue; - } + /* pioDirIter will not return other kinds of entries */ + ft_assert(file->kind == PIO_KIND_REGULAR || file->kind == PIO_KIND_DIRECTORY); if (exclude) { @@ -638,11 +570,15 @@ dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, if (check_res == CHECK_FALSE) { /* Skip */ + if (file->kind == PIO_KIND_DIRECTORY) + pio_recursive_dir_dont_recurse_current(dir); pgFileFree(file); continue; } else if (check_res == CHECK_EXCLUDE_FALSE) { + ft_assert(file->kind == PIO_KIND_DIRECTORY); + pio_recursive_dir_dont_recurse_current(dir); /* We add the directory itself which content was excluded */ parray_append(files, file); continue; @@ -650,24 +586,11 @@ dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, } parray_append(files, file); - - /* - * If the entry is a directory call dir_list_file_internal() - * recursively. - */ - if (file->kind == PIO_KIND_DIRECTORY) - dir_list_file_internal(files, file, child, exclude, - backup_logs, external_dir_num, location); } - if (errno && errno != ENOENT) - { - int errno_tmp = errno; - fio_closedir(dir); - elog(ERROR, "Cannot read directory \"%s\": %s", - parent_dir, strerror(errno_tmp)); - } - fio_closedir(dir); + pio_recursive_dir_free(dir); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Listing directory \"%s\"", root); } /* From 19c33c92406b220be0d01f8802f531521e37d613 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 18 Dec 2022 21:31:22 +0300 Subject: [PATCH 229/339] [PBCKP-416] fix test_basic_missing_dir_permissions --- tests/backup_test.py | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/tests/backup_test.py b/tests/backup_test.py index da86ce725..16ac0cab5 100644 --- a/tests/backup_test.py +++ b/tests/backup_test.py @@ -1737,22 +1737,10 @@ def test_basic_missing_dir_permissions(self): os.chmod(full_path, 000) - try: + with self.assertRaisesRegex(ProbackupException, r'ERROR:[^\n]*Cannot open dir'): # FULL backup self.backup_node( backup_dir, 'node', node, options=['--stream']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of missing permissions" - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: Cannot open directory', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) os.rmdir(full_path) From 07137a715b36a542fe57e31f1a994da3a41e6aad Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sat, 17 Dec 2022 15:38:05 +0300 Subject: [PATCH 230/339] [PBCKP-416] remove fio_opendir/fio_readdir/fio_closedir --- src/utils/file.c | 113 ----------------------------------------------- src/utils/file.h | 8 ---- 2 files changed, 121 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index d7a20789b..aaffbd03c 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -412,93 +412,6 @@ pio_limit_mode(mode_t mode) return mode; } -/* Open directory */ -DIR* -fio_opendir(fio_location location, const char* path) -{ - DIR* dir; - if (fio_is_remote(location)) - { - int handle; - fio_header hdr; - - handle = find_free_handle(); - hdr.cop = FIO_OPENDIR; - hdr.handle = handle; - hdr.size = strlen(path) + 1; - set_handle(handle); - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(fio_stdout, path, hdr.size), hdr.size); - - IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); - - if (hdr.arg != 0) - { - errno = hdr.arg; - unset_handle(hdr.handle); - return NULL; - } - dir = (DIR*)(size_t)(handle + 1); - } - else - { - dir = opendir(path); - } - return dir; -} - -/* Get next directory entry */ -struct dirent* -fio_readdir(DIR *dir) -{ - if (fio_is_remote_file((FILE*)dir)) - { - fio_header hdr; - static __thread struct dirent entry; - - hdr.cop = FIO_READDIR; - hdr.handle = (size_t)dir - 1; - hdr.size = 0; - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - - IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); - Assert(hdr.cop == FIO_SEND); - if (hdr.size) { - Assert(hdr.size == sizeof(entry)); - IO_CHECK(fio_read_all(fio_stdin, &entry, sizeof(entry)), sizeof(entry)); - } - - return hdr.size ? &entry : NULL; - } - else - { - return readdir(dir); - } -} - -/* Close directory */ -int -fio_closedir(DIR *dir) -{ - if (fio_is_remote_file((FILE*)dir)) - { - fio_header hdr; - hdr.cop = FIO_CLOSEDIR; - hdr.handle = (size_t)dir - 1; - hdr.size = 0; - unset_handle(hdr.handle); - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - return 0; - } - else - { - return closedir(dir); - } -} - - /* Close ssh session */ void fio_disconnect(void) @@ -2044,12 +1957,10 @@ fio_communicate(int in, int out) * can use the same index at both sides. */ int fd[FIO_FDMAX]; - DIR* dir[FIO_FDMAX]; fobj_t objs[FIO_FDMAX] = {0}; err_i async_errs[FIO_FDMAX] = {0}; - struct dirent* entry; size_t buf_size = 128*1024; char* buf = (char*)pgut_malloc(buf_size); fio_header hdr; @@ -2085,30 +1996,6 @@ fio_communicate(int in, int out) } errno = 0; /* reset errno */ switch (hdr.cop) { - case FIO_OPENDIR: /* Open directory for traversal */ - dir[hdr.handle] = opendir(buf); - hdr.arg = dir[hdr.handle] == NULL ? errno : 0; - hdr.size = 0; - IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); - break; - case FIO_READDIR: /* Get next directory entry */ - hdr.cop = FIO_SEND; - entry = readdir(dir[hdr.handle]); - if (entry != NULL) - { - hdr.size = sizeof(*entry); - IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(out, entry, hdr.size), hdr.size); - } - else - { - hdr.size = 0; - IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); - } - break; - case FIO_CLOSEDIR: /* Finish directory traversal */ - SYS_CHECK(closedir(dir[hdr.handle])); - break; case FIO_OPEN: /* Open file */ fd[hdr.handle] = open(buf, hdr.arg, FILE_PERMISSION); hdr.arg = fd[hdr.handle] < 0 ? errno : 0; diff --git a/src/utils/file.h b/src/utils/file.h index bfecea155..8a2fe1c70 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -36,9 +36,6 @@ typedef enum FIO_READ, FIO_STAT, FIO_SEND, - FIO_OPENDIR, - FIO_READDIR, - FIO_CLOSEDIR, FIO_PAGE, FIO_GET_CRC32, /* used for incremental restore */ @@ -159,11 +156,6 @@ extern void fio_error(int rc, int size, const char* file, int line); extern void fio_get_agent_version(int* protocol, char* payload_buf, size_t payload_buf_size); -/* DIR-style functions */ -extern DIR* fio_opendir(fio_location location, const char* path); -extern struct dirent * fio_readdir(DIR *dirp); -extern int fio_closedir(DIR *dirp); - /* pathname-style functions */ extern int fio_sync(fio_location location, const char* path); extern pg_crc32 From fd3fba7894eebd899e2c98c5cf2871d295624141 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 18 Dec 2022 22:50:11 +0300 Subject: [PATCH 231/339] [PBCKP-416] db_list_dir: reuse stat from dir iteration --- src/dir.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/src/dir.c b/src/dir.c index e30b1acd4..bfc043118 100644 --- a/src/dir.c +++ b/src/dir.c @@ -132,6 +132,15 @@ static TablespaceList tablespace_dirs = {NULL, NULL}; /* Extra directories mapping */ static TablespaceList external_remap_list = {NULL, NULL}; +static void +pgFileSetStat(pgFile* file, pio_stat_t st) +{ + file->size = st.pst_size; + file->kind = st.pst_kind; + file->mode = st.pst_mode; + file->mtime = st.pst_mtime; +} + pgFile * pgFileNew(const char *path, const char *rel_path, bool follow_symlink, int external_dir_num, pioDrive_i drive) @@ -152,10 +161,7 @@ pgFileNew(const char *path, const char *rel_path, bool follow_symlink, } file = pgFileInit(rel_path); - file->size = st.pst_size; - file->kind = st.pst_kind; - file->mode = st.pst_mode; - file->mtime = st.pst_mtime; + pgFileSetStat(file, st); file->external_dir_num = external_dir_num; return file; @@ -557,10 +563,10 @@ db_list_dir(parray *files, const char* root, join_path_components(child, root, dent.name.ptr); - file = pgFileNew(child, dent.name.ptr, true, external_dir_num, - drive); - if (file == NULL) - continue; + file = pgFileInit(dent.name.ptr); + pgFileSetStat(file, dent.stat); + file->external_dir_num = external_dir_num; + /* pioDirIter will not return other kinds of entries */ ft_assert(file->kind == PIO_KIND_REGULAR || file->kind == PIO_KIND_DIRECTORY); From b4d2d821f25a655476a9525813b73e1580ef7238 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 18 Dec 2022 23:16:01 +0300 Subject: [PATCH 232/339] ft_strbuf_cat_zt - fix for empty string --- src/fu_util/impl/ft_impl.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/fu_util/impl/ft_impl.h b/src/fu_util/impl/ft_impl.h index 9cf4a06cc..2ffcd9554 100644 --- a/src/fu_util/impl/ft_impl.h +++ b/src/fu_util/impl/ft_impl.h @@ -570,6 +570,8 @@ ft_strbuf_cat(ft_strbuf_t *buf, ft_str_t s) { ft_inline bool ft_strbuf_cat_zt(ft_strbuf_t *buf, ft_str_t s) { /* we could actually reuse ft_strbuf_catbytes */ + if (s.len == 0) + return ft_strbuf_cat1(buf, 0); return ft_strbuf_catbytes(buf, ft_bytes(s.ptr, s.len+1)); } From 92495b85d29a6575ac2077490695f7295f558d84 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 18 Dec 2022 23:19:46 +0300 Subject: [PATCH 233/339] [PBCKP-416] PIO_DIR_NEXT: abuse empty entry as stop signal. --- src/utils/file.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index aaffbd03c..0c5f274c7 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -2346,15 +2346,16 @@ fio_communicate(int in, int out) pio_dirent_t dirent; int n; - for (n = 0; n < PIO_DIR_REMOTE_BATCH; n++) + for (n = 0; n < PIO_DIR_REMOTE_BATCH;) { dirent = $(pioDirNext, objs[hdr.handle], .err = &err); if ($haserr(err)) break; - if (dirent.stat.pst_kind == PIO_KIND_UNKNOWN) - break; ft_strbuf_catbytes(&stats, FT_BYTES_FOR(dirent.stat)); ft_strbuf_cat_zt(&names, dirent.name); + n++; + if (dirent.stat.pst_kind == PIO_KIND_UNKNOWN) + break; } if ($haserr(err)) From 719cce1b1b0ee62d21b78d77920420dd51bd4687 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 19 Dec 2022 02:22:23 +0300 Subject: [PATCH 234/339] ft_str_chop1 - cut 1 byte from string (+ fix ft_str_ends_withc return type) --- src/fu_util/ft_util.h | 4 +++- src/fu_util/impl/ft_impl.h | 9 ++++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/src/fu_util/ft_util.h b/src/fu_util/ft_util.h index e4778ecf2..fa37fbb69 100644 --- a/src/fu_util/ft_util.h +++ b/src/fu_util/ft_util.h @@ -427,9 +427,11 @@ ft_inline FT_CMP_RES ft_strcmpc(ft_str_t str, const char* oth); ft_inline void ft_str_consume(ft_str_t *str, size_t cut); ft_inline size_t ft_str_spnc(ft_str_t str, const char* chars); -ft_inline size_t ft_str_ends_withc(ft_str_t str, const char* needle); +ft_inline bool ft_str_ends_withc(ft_str_t str, const char* needle); ft_inline size_t ft_str_find_cstr(ft_str_t haystack, const char *needle); +ft_inline void ft_str_chop1(ft_str_t *str); + /* shift zero-terminated string. Will assert if no zero-byte found and it is not last */ extern ft_str_t ft_bytes_shift_zt(ft_bytes_t *bytes); diff --git a/src/fu_util/impl/ft_impl.h b/src/fu_util/impl/ft_impl.h index 2ffcd9554..096093f71 100644 --- a/src/fu_util/impl/ft_impl.h +++ b/src/fu_util/impl/ft_impl.h @@ -494,7 +494,7 @@ ft_str_spnc(ft_str_t str, const char* chars) { return ft_bytes_spnc(ft_str2bytes(str), chars); } -ft_inline size_t +ft_inline bool ft_str_ends_withc(ft_str_t str, const char* needle) { return ft_bytes_ends_withc(ft_str2bytes(str), needle); } @@ -504,6 +504,13 @@ ft_str_find_cstr(ft_str_t haystack, const char *needle) { return ft_bytes_find_cstr(ft_str2bytes(haystack), needle); } +ft_inline void +ft_str_chop1(ft_str_t *str) { + ft_assert(str->len >= 1); + str->ptr[str->len-1] = 0; + str->len--; +} + ft_inline ft_strbuf_t ft_strbuf_zero(void) { return (ft_strbuf_t){.ptr = "", .len = 0, .cap = 0}; From 43d48110c3f066695cf2001cefa7c97f5c27be76 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 19 Dec 2022 03:11:42 +0300 Subject: [PATCH 235/339] fix test_no_control_file --- src/catalog.c | 12 +++++++++++- src/configure.c | 25 ++++++++++++++----------- src/pg_probackup.c | 9 ++++++++- src/pg_probackup.h | 2 +- src/utils/file.c | 21 +++++++-------------- src/validate.c | 6 +++++- 6 files changed, 46 insertions(+), 29 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index 32b977779..fbf5db766 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -2620,6 +2620,7 @@ readBackupControlFile(pioDrive_i drive, const char *path) char *server_version = NULL; char *compress_alg = NULL; int parsed_options; + err_i err; ConfigOption options[] = { @@ -2658,7 +2659,16 @@ readBackupControlFile(pioDrive_i drive, const char *path) pgBackupInit(backup, drive); - parsed_options = config_read_opt(drive, path, options, WARNING, true, false); + parsed_options = config_read_opt(drive, path, options, WARNING, true, &err); + + if (getErrno(err) == ENOENT) + { + elog(WARNING, "Control file \"%s\" doesn't exist", path); + pgBackupFree(backup); + return NULL; + } + else if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Control file"); if (parsed_options == 0) { diff --git a/src/configure.c b/src/configure.c index 5a181f2ae..d338d5ea1 100644 --- a/src/configure.c +++ b/src/configure.c @@ -289,25 +289,19 @@ do_show_config(void) */ int config_read_opt(pioDrive_i drive, const char *path, ConfigOption options[], int elevel, - bool strict, bool missing_ok) + bool strict, err_i *err) { int parsed_options = 0; - err_i err = $noerr(); ft_bytes_t config_file = {0}; + fobj_reset_err(err); if (!options) return parsed_options; config_file = $i(pioReadFile, drive, .path = path, .binary = false, - .err = &err); - if ($haserr(err)) - { - if (missing_ok && getErrno(err) == ENOENT) - return 0; - - ft_logerr(FT_FATAL, $errmsg(err), "could not read file"); + .err = err); + if ($haserr(*err)) return 0; - } parsed_options = config_parse_opt(config_file, path, options, elevel, strict); @@ -410,6 +404,7 @@ readInstanceConfigFile(InstanceState *instanceState) char *log_format_file = NULL; char *compress_alg = NULL; int parsed_options; + err_i err; ConfigOption instance_options[] = { @@ -607,7 +602,15 @@ readInstanceConfigFile(InstanceState *instanceState) init_config(instance, instanceState->instance_name); parsed_options = config_read_opt(instanceState->backup_location, instanceState->instance_config_path, - instance_options, WARNING, true, true); + instance_options, WARNING, true, &err); + if (getErrno(err) == ENOENT) + { + elog(WARNING, "Control file \"%s\" doesn't exist", instanceState->instance_config_path); + pfree(instance); + return NULL; + } + else if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Control file"); if (parsed_options == 0) { diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 4e2c9b6f6..316bde2fb 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -566,7 +566,14 @@ main(int argc, char *argv[]) backup_subcmd != ARCHIVE_GET_CMD) { config_read_opt(instanceState->backup_location, instanceState->instance_config_path, - instance_options, ERROR, true, backup_subcmd == CHECKDB_CMD); + instance_options, ERROR, true, &err); + + if (getErrno(err) == ENOENT && backup_subcmd == CHECKDB_CMD) + { + /* ignore */ + } + else if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Reading instance control"); /* * We can determine our location only after reading the configuration file, diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 6432b794d..9eaab7986 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -841,7 +841,7 @@ extern void do_archive_get(InstanceState *instanceState, InstanceConfig *instanc /* in configure.c */ extern int config_read_opt(pioDrive_i drive, const char *path, ConfigOption options[], int elevel, - bool strict, bool missing_ok); + bool strict, err_i *err); extern void do_show_config(void); extern void do_set_config(InstanceState *instanceState); extern void init_config(InstanceConfig *config, const char *instance_name); diff --git a/src/utils/file.c b/src/utils/file.c index 0c5f274c7..66b9ab62d 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -2055,11 +2055,7 @@ fio_communicate(int in, int out) .binary = hdr.arg != 0, .err = &err); if ($haserr(err)) { - const char *msg = $errmsg(err); - hdr.arg = getErrno(err); - hdr.size = strlen(msg) + 1; - IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(out, msg, hdr.size), hdr.size); + fio_send_pio_err(out, err); } else { @@ -3914,7 +3910,6 @@ pioRemoteDrive_pioRemoveDir(VSelf, const char *root, bool root_as_well) { static ft_bytes_t pioRemoteDrive_pioReadFile(VSelf, path_t path, bool binary, err_i* err) { - FOBJ_FUNC_ARP(); Self(pioRemoteDrive); ft_bytes_t res; @@ -3934,19 +3929,17 @@ pioRemoteDrive_pioReadFile(VSelf, path_t path, bool binary, err_i* err) /* get the response */ IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + if (hdr.cop == FIO_PIO_ERROR) + { + *err = fio_receive_pio_err(&hdr); + return ft_bytes(NULL, 0); + } + Assert(hdr.cop == FIO_READ_FILE_AT_ONCE); res = ft_bytes_alloc(hdr.size); IO_CHECK(fio_read_all(fio_stdin, res.ptr, hdr.size), hdr.size); - if (hdr.arg != 0) - { - *err = $syserr((int)hdr.arg, "Could not read remote file {path:q}: {causeStr}", - path(path), causeStr(res.ptr)); - $iresult(*err); - ft_bytes_free(&res); - } - return res; } diff --git a/src/validate.c b/src/validate.c index 680ebd766..d7399826f 100644 --- a/src/validate.c +++ b/src/validate.c @@ -398,6 +398,7 @@ do_validate_all(CatalogState *catalogState, InstanceState *instanceState) { corrupted_backup_found = false; skipped_due_to_lock = false; + err_i err; if (instanceState == NULL) { @@ -413,6 +414,7 @@ do_validate_all(CatalogState *catalogState, InstanceState *instanceState) errno = 0; while ((dent = readdir(dir))) { + FOBJ_LOOP_ARP(); char child[MAXPGPATH]; struct stat st; InstanceState *instanceState; @@ -437,8 +439,10 @@ do_validate_all(CatalogState *catalogState, InstanceState *instanceState) instanceState = makeInstanceState(catalogState, dent->d_name); if (config_read_opt(catalogState->backup_location, instanceState->instance_config_path, - instance_options, ERROR, false, true) == 0) + instance_options, ERROR, false, &err) == 0) { + if ($haserr(err) && getErrno(err) != ENOENT) + ft_logerr(FT_FATAL, $errmsg(err), ""); elog(WARNING, "Configuration file \"%s\" is empty", instanceState->instance_config_path); corrupted_backup_found = true; continue; From e86535e6a0c539d7c4967d445179f4bba283bd86 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 19 Dec 2022 03:46:58 +0300 Subject: [PATCH 236/339] more remote connection ensuring --- src/utils/file.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/utils/file.c b/src/utils/file.c index 66b9ab62d..5088ac9d4 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -3626,6 +3626,7 @@ pioRemoteDrive_pioOpenRead(VSelf, path_t path, err_i *err) fio_header hdr; fobj_reset_err(err); fobj_t file; + fio_ensure_remote(); handle = find_free_handle(); @@ -3683,6 +3684,7 @@ pioRemoteDrive_pioStat(VSelf, path_t path, bool follow_symlink, err_i *err) .arg = follow_symlink, }; fobj_reset_err(err); + fio_ensure_remote(); IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_write_all(fio_stdout, path, hdr.size), hdr.size); @@ -3712,6 +3714,7 @@ pioRemoteDrive_pioFilesAreSame(VSelf, path_t file1, path_t file2) ft_strbuf_catc_zt(&buf, file1); ft_strbuf_catc_zt(&buf, file2); hdr.size = buf.len + 1; + fio_ensure_remote(); IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_write_all(fio_stdout, buf.ptr, buf.len+1), buf.len+1); @@ -3735,6 +3738,7 @@ pioRemoteDrive_pioRemove(VSelf, path_t path, bool missing_ok) .size = strlen(path) + 1, .arg = missing_ok ? 1 : 0, }; + fio_ensure_remote(); IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_write_all(fio_stdout, path, hdr.size), hdr.size); @@ -3761,6 +3765,7 @@ pioRemoteDrive_pioRename(VSelf, path_t old_path, path_t new_path) .size = old_path_len + new_path_len, .arg = 0, }; + fio_ensure_remote(); IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_write_all(fio_stdout, old_path, old_path_len), old_path_len); @@ -3784,6 +3789,7 @@ pioRemoteDrive_pioGetCRC32(VSelf, path_t path, bool compressed, err_i *err) size_t path_len = strlen(path) + 1; pg_crc32 crc = 0; fobj_reset_err(err); + fio_ensure_remote(); hdr.cop = FIO_GET_CRC32; hdr.handle = -1; @@ -3817,6 +3823,7 @@ pioRemoteDrive_pioMakeDir(VSelf, path_t path, mode_t mode, bool strict) .size = strlen(path) + 1, .arg = mode, }; + fio_ensure_remote(); IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_write_all(fio_stdout, path, hdr.size), hdr.size); @@ -3841,6 +3848,7 @@ pioRemoteDrive_pioOpenDir(VSelf, path_t path, err_i* err) .size = strlen(path)+1, }; fobj_reset_err(err); + fio_ensure_remote(); IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_write_all(fio_stdout, path, hdr.size), hdr.size); @@ -3869,6 +3877,7 @@ pioRemoteDrive_pioIsDirEmpty(VSelf, path_t path, err_i* err) .size = strlen(path)+1, }; fobj_reset_err(err); + fio_ensure_remote(); IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_write_all(fio_stdout, path, hdr.size), hdr.size); @@ -3888,6 +3897,7 @@ pioRemoteDrive_pioRemoveDir(VSelf, const char *root, bool root_as_well) { FOBJ_FUNC_ARP(); fio_header hdr; fio_remove_dir_request req; + fio_ensure_remote(); /* Send to the agent message with parameters for directory listing */ snprintf(req.path, MAXPGPATH, "%s", root); From 81b8d0c23edbf9c71f9eff400d425965a113fbad Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 19 Dec 2022 04:57:44 +0300 Subject: [PATCH 237/339] fix ft_strbuf_cat_path --- src/utils/file.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/utils/file.c b/src/utils/file.c index 5088ac9d4..f8aa4d843 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -5971,6 +5971,9 @@ skip_drive(const char *path) bool ft_strbuf_cat_path(ft_strbuf_t *buf, ft_str_t path) { + if (path.len == 0) + return true; + /* here we repeat join_path_components */ if (buf->len > 0 && !IS_DIR_SEP(buf->ptr[buf->len-1])) { From 50b262fbfed165cebb5577017bc17174c08bd4d1 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 19 Dec 2022 05:16:02 +0300 Subject: [PATCH 238/339] make do_add_instance more pio friendly --- src/init.c | 53 +++++++++++++++++++++++++++++++++-------------------- 1 file changed, 33 insertions(+), 20 deletions(-) diff --git a/src/init.c b/src/init.c index 6c7439eef..e304a4415 100644 --- a/src/init.c +++ b/src/init.c @@ -71,6 +71,8 @@ do_add_instance(InstanceState *instanceState, InstanceConfig *instance) struct stat st; CatalogState *catalogState = instanceState->catalog_state; err_i err; + bool exists; + int i; /* PGDATA is always required */ if (instance->pgdata == NULL) @@ -84,27 +86,38 @@ do_add_instance(InstanceState *instanceState, InstanceConfig *instance) /* Ensure that all root directories already exist */ /* TODO maybe call do_init() here instead of error?*/ - if (access(catalogState->catalog_path, F_OK) != 0) - elog(ERROR, "Directory does not exist: '%s'", catalogState->catalog_path); - - if (access(catalogState->backup_subdir_path, F_OK) != 0) - elog(ERROR, "Directory does not exist: '%s'", catalogState->backup_subdir_path); - - if (access(catalogState->wal_subdir_path, F_OK) != 0) - elog(ERROR, "Directory does not exist: '%s'", catalogState->wal_subdir_path); - - if (stat(instanceState->instance_backup_subdir_path, &st) == 0 && S_ISDIR(st.st_mode)) - elog(ERROR, "Instance '%s' backup directory already exists: '%s'", - instanceState->instance_name, instanceState->instance_backup_subdir_path); + { + const char *paths[] = { + catalogState->catalog_path, + catalogState->backup_subdir_path, + catalogState->wal_subdir_path}; + for (i = 0; i < ft_arrsz(paths); i++) + { + exists = $i(pioExists, catalogState->backup_location, .path = paths[i], + .expected_kind = PIO_KIND_DIRECTORY, .err = &err); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Check instance"); + if (!exists) + elog(ERROR, "Directory does not exist: '%s'", paths); + } + } - /* - * Create directory for wal files of this specific instance. - * Existence check is extra paranoid because if we don't have such a - * directory in data dir, we shouldn't have it in wal as well. - */ - if (stat(instanceState->instance_wal_subdir_path, &st) == 0 && S_ISDIR(st.st_mode)) - elog(ERROR, "Instance '%s' WAL archive directory already exists: '%s'", - instanceState->instance_name, instanceState->instance_wal_subdir_path); + { + const char *paths[][2] = { + {"backup", instanceState->instance_backup_subdir_path}, + {"WAL", instanceState->instance_wal_subdir_path}, + }; + for (i = 0; i < ft_arrsz(paths); i++) + { + exists = !$i(pioIsDirEmpty, catalogState->backup_location, .path = paths[i][1], + .err = &err); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Check instance"); + if (exists) + elog(ERROR, "Instance '%s' %s directory alredy exists: '%s'", + instanceState->instance_name, paths[i][0], paths[i][1]); + } + } /* Create directory for data files of this specific instance */ err = $i(pioMakeDir, backup_location, .path = instanceState->instance_backup_subdir_path, From 39de9b8b182582e7092b48bc3cea349d52b79f38 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 19 Dec 2022 05:22:58 +0300 Subject: [PATCH 239/339] use pioExists(expected_kind = PIO_KIND_DIRECTORY) to check instance path --- src/pg_probackup.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 316bde2fb..93de27ab3 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -526,11 +526,9 @@ main(int argc, char *argv[]) if (backup_subcmd != INIT_CMD && backup_subcmd != ADD_INSTANCE_CMD && backup_subcmd != ARCHIVE_GET_CMD) { - pio_stat_t st; - - st = $i(pioStat, catalogState->backup_location, + bool exists = $i(pioExists, catalogState->backup_location, .path = instanceState->instance_backup_subdir_path, - .follow_symlink = true, + .expected_kind = PIO_KIND_DIRECTORY, .err = &err); if ($haserr(err)) @@ -542,12 +540,8 @@ main(int argc, char *argv[]) elog(ERROR, "Instance '%s' does not exist in this backup catalog", instance_name); } - else - { - /* Ensure that backup_path is a path to a directory */ - if (st.pst_kind != PIO_KIND_DIRECTORY) - elog(ERROR, "-B, --backup-path must be a path to directory"); - } + if (!exists) + elog(ERROR, "-B, --backup-path must be a path to directory"); } } From 44c0d3874ae57179b78971c04d6c03fc35784a31 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 19 Dec 2022 05:24:46 +0300 Subject: [PATCH 240/339] fixup "make do_add_instance more pio friendly" --- src/init.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/init.c b/src/init.c index e304a4415..638096af4 100644 --- a/src/init.c +++ b/src/init.c @@ -68,7 +68,6 @@ int do_add_instance(InstanceState *instanceState, InstanceConfig *instance) { pioDrive_i backup_location = pioDriveForLocation(FIO_BACKUP_HOST); - struct stat st; CatalogState *catalogState = instanceState->catalog_state; err_i err; bool exists; @@ -98,7 +97,7 @@ do_add_instance(InstanceState *instanceState, InstanceConfig *instance) if ($haserr(err)) ft_logerr(FT_FATAL, $errmsg(err), "Check instance"); if (!exists) - elog(ERROR, "Directory does not exist: '%s'", paths); + elog(ERROR, "Directory does not exist: '%s'", paths[i]); } } From e43dfb47a2cbb8994030814b65c679af6f3857e3 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 19 Dec 2022 06:01:57 +0300 Subject: [PATCH 241/339] do_init: use pioIsDirEmpty for existance check --- src/init.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/src/init.c b/src/init.c index 638096af4..bd9a857f7 100644 --- a/src/init.c +++ b/src/init.c @@ -19,19 +19,16 @@ int do_init(CatalogState *catalogState) { pioDrive_i backup_location = pioDriveForLocation(FIO_BACKUP_HOST); - int results; + bool empty; err_i err; - results = pg_check_dir(catalogState->catalog_path); + empty = $i(pioIsDirEmpty, backup_location,.path = catalogState->catalog_path, + .err = &err); - if (results == 4) /* exists and not empty*/ + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "cannot open backup catalog directory"); + if (!empty) elog(ERROR, "backup catalog already exist and it's not empty"); - else if (results == -1) /*trouble accessing directory*/ - { - int errno_tmp = errno; - elog(ERROR, "cannot open backup catalog directory \"%s\": %s", - catalogState->catalog_path, strerror(errno_tmp)); - } /* create backup catalog root directory */ err = $i(pioMakeDir, backup_location, .path = catalogState->catalog_path, From 2b11c59e48f9c90c6b374fec77555314e24d4103 Mon Sep 17 00:00:00 2001 From: Daniil Shelepanov Date: Tue, 20 Dec 2022 00:16:01 +0300 Subject: [PATCH 242/339] [PBCKP-318] rewrite backup_non_data_file to pio. --- src/data.c | 115 ++++++++++++++++++++++++++++------------------- src/utils/file.c | 93 ++++++++++++++++++++++++++++++++++++++ src/utils/file.h | 8 ++++ 3 files changed, 170 insertions(+), 46 deletions(-) diff --git a/src/data.c b/src/data.c index 35eece9f4..356b2b0d3 100644 --- a/src/data.c +++ b/src/data.c @@ -71,6 +71,8 @@ static size_t restore_data_file_internal(pioReader_i in, pioDBWriter_i out, pgFi datapagemap_t *map, PageState *checksum_map, int checksum_version, datapagemap_t *lsn_map, BackupPageHeader2 *headers); +static err_i send_file(const char *to_fullpath, const char *from_path, bool cut_zero_tail, pgFile *file); + #ifdef HAVE_LIBZ /* Implementation of zlib compression method */ static int32 @@ -1153,11 +1155,10 @@ backup_non_data_file_internal(const char *from_fullpath, const char *to_fullpath, pgFile *file, bool missing_ok) { - FILE *out = NULL; - char *errmsg = NULL; - int rc; bool cut_zero_tail; + err_i err; + FOBJ_FUNC_ARP(); cut_zero_tail = file->forkName == cfm; INIT_CRC32C(file->crc); @@ -1167,57 +1168,79 @@ backup_non_data_file_internal(const char *from_fullpath, file->write_size = 0; file->uncompressed_size = 0; - /* open backup file for write */ - out = fopen(to_fullpath, PG_BINARY_W); - if (out == NULL) - elog(ERROR, "Cannot open destination file \"%s\": %s", - to_fullpath, strerror(errno)); - - /* update file permission */ - if (chmod(to_fullpath, file->mode) == -1) - elog(ERROR, "Cannot change mode of \"%s\": %s", to_fullpath, - strerror(errno)); - - /* backup remote file */ - if (fio_is_remote(FIO_DB_HOST)) - rc = fio_send_file(from_fullpath, out, cut_zero_tail, file, &errmsg); - else - rc = fio_send_file_local(from_fullpath, out, cut_zero_tail, file, &errmsg); + /* backup non-data file */ + err = send_file(to_fullpath, from_fullpath, cut_zero_tail, file); /* handle errors */ - if (rc == FILE_MISSING) - { - /* maybe deleted, it's not error in case of backup */ - if (missing_ok) - { - elog(LOG, "File \"%s\" is not found", from_fullpath); - file->write_size = FILE_NOT_FOUND; - goto cleanup; - } - else - elog(ERROR, "File \"%s\" is not found", from_fullpath); - } - else if (rc == WRITE_FAILED) - elog(ERROR, "Cannot write to \"%s\": %s", to_fullpath, strerror(errno)); - else if (rc != SEND_OK) - { - if (errmsg) - elog(ERROR, "%s", errmsg); - else - elog(ERROR, "Cannot access remote file \"%s\"", from_fullpath); + if($haserr(err)) { + if(getErrno(err) == ENOENT) { + if(missing_ok) { + elog(LOG, "File \"%s\" is not found", from_fullpath); + file->write_size = FILE_NOT_FOUND; + return; + } else + elog(ERROR, "File \"%s\" is not found", from_fullpath); + } else + elog(ERROR, "An error occured while copying %s: %s", + from_fullpath, $errmsg(err)); } file->uncompressed_size = file->read_size; +} -cleanup: - if (errmsg != NULL) - pg_free(errmsg); +static err_i +send_file(const char *to_fullpath, const char *from_fullpath, bool cut_zero_tail, pgFile *file) { + FOBJ_FUNC_ARP(); + err_i err = $noerr(); + pioReadStream_i in; + pioWriteCloser_i out; + pioDrive_i backup_drive = pioDriveForLocation(FIO_BACKUP_HOST); + pioDrive_i db_drive = pioDriveForLocation(FIO_DB_HOST); + + /* open to_fullpath */ + out = $i(pioOpenRewrite, backup_drive, .path = to_fullpath, + .permissions = file->mode, .err = &err); + + if($haserr(err)) + elog(ERROR, "Cannot open destination file \"%s\": %s", + to_fullpath, $errmsg(err)); - /* finish CRC calculation and store into pgFile */ - FIN_CRC32C(file->crc); + /* open from_fullpath */ + in = $i(pioOpenReadStream, db_drive, .path = from_fullpath, .err = &err); + + if($haserr(err)) + goto cleanup; + + /* + * Copy content and calc CRC as it gets copied. Optionally pioZeroTail + * will be used. + */ + pioCRC32Counter *c = pioCRC32Counter_alloc(); + pioZeroTail *zt = pioZeroTail_alloc(); + pioFilter_i ztFlt = bind_pioFilter(zt); + pioFilter_i crcFlt = bind_pioFilter(c); + pioFilter_i fltrs[] = { ztFlt, crcFlt }; + + err = pioCopyWithFilters($reduce(pioWriteFlush, out), $reduce(pioRead, in), + cut_zero_tail ? fltrs : &fltrs[1], + cut_zero_tail ? 2 : 1, + NULL); + + if($haserr(err)) + goto cleanup; + + if (file) { + file->crc = pioCRC32Counter_getCRC32(c); + file->read_size = pioCRC32Counter_getSize(c); + file->write_size = pioCRC32Counter_getSize(c); + } + +cleanup: + $i(pioClose, in); + $i(pioClose, out); - if (out && fclose(out)) - elog(ERROR, "Cannot close the file \"%s\": %s", to_fullpath, strerror(errno)); + // has $noerr() by default + return $iresult(err); } /* diff --git a/src/utils/file.c b/src/utils/file.c index f8aa4d843..192662c13 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -2711,6 +2711,15 @@ typedef struct pioReSeekableReader { #define kls__pioReSeekableReader iface__pioReader, mth(fobjDispose) fobj_klass(pioReSeekableReader); +/* zero tail detector */ +typedef struct pioZeroTail { + int64_t read_size; + int64_t write_size; +} pioZeroTail; + +#define kls__pioZeroTail iface__pioFilter, mth(fobjDispose), iface(pioFilter) +fobj_klass(pioZeroTail); + /* CRC32 counter */ typedef struct pioDevNull { @@ -4918,6 +4927,89 @@ pioWriteFilter_fobjRepr(VSelf) (filter, self->filter.self)); } +pioZeroTail* +pioZeroTail_alloc(void) { + return $alloc(pioZeroTail, .read_size = 0, .write_size = 0); +} + +static pioFltTransformResult +pioZeroTail_pioFltTransform(VSelf, ft_bytes_t rbuf, ft_bytes_t wbuf, err_i *err) { + Self(pioZeroTail); + pioFltTransformResult tr = {0, 0}; + fobj_reset_err(err); + + size_t non_zero_len = find_zero_tail(rbuf.ptr, rbuf.len); + /* + * It is dirty trick to silence warnings in CFS GC process: + * backup at least cfs header size bytes. + */ + if (self->read_size + non_zero_len < PAGE_ZEROSEARCH_FINE_GRANULARITY && + self->read_size + rbuf.len > 0) + { + non_zero_len = Min(PAGE_ZEROSEARCH_FINE_GRANULARITY, + self->read_size + rbuf.len); + non_zero_len -= self->read_size; + } + + if(non_zero_len > 0) { + /* + * Calculating how many zeroes we can actually copy. + * self->read_size - self->write_size always equals the number of zero bytes. + */ + ssize_t zeroes_to_fill = ft_min(self->read_size - self->write_size, wbuf.len); + + /* Restoring the zeroes gap */ + memset(wbuf.ptr, 0, zeroes_to_fill); + ft_bytes_consume(&wbuf, zeroes_to_fill); + self->write_size += zeroes_to_fill; + tr.produced += zeroes_to_fill; + + /* + * At this moment, wbuf.len will be deacreased by zeroes_to_fill, so it + * represents the room left in wbuf. + */ + if(self->read_size == self->write_size && wbuf.len > 0) { + /* + * All zeroes are in buffer at this point so self->read_size == self->write_size + * and all we need to copy is non_zero_len bytes. This can be done in multiple + * calls and the data will not be lost since tr.consumed will be increased only + * by to_copy bytes. + */ + ft_bytes_t copybuf = ft_bytes_split(&rbuf, ft_min(wbuf.len, non_zero_len)); + size_t to_move = ft_bytes_move(&wbuf, ©buf); + + self->write_size += to_move; + tr.consumed = to_move + (to_move < non_zero_len ? 0 : rbuf.len); + self->read_size += tr.consumed; + + tr.produced += to_move; + } + + /* + * In case there are some unwritten zeroes left tr.consumed == 0 here and the next + * time this filter is called we'll read the same data chunk again. + */ + } else { + // There's a zero tail, skip it for now + self->read_size += rbuf.len; + tr.consumed += rbuf.len; + } + + return tr; +} + +static size_t +pioZeroTail_pioFltFinish(VSelf, ft_bytes_t wbuf, err_i *err) { + Self(pioZeroTail); + fobj_reset_err(err); + + return 0; +} + +static void +pioZeroTail_fobjDispose(VSelf) { +} + #ifdef HAVE_LIBZ #define MAX_WBITS 15 /* 32K LZ77 window */ #define DEF_MEM_LEVEL 8 @@ -6002,6 +6094,7 @@ fobj_klass_handle(pioReadFilter, mth(fobjDispose, fobjRepr)); fobj_klass_handle(pioDevNull); fobj_klass_handle(pioCRC32Counter); fobj_klass_handle(pioReSeekableReader); +fobj_klass_handle(pioZeroTail); #ifdef HAVE_LIBZ fobj_klass_handle(pioGZCompress, mth(fobjRepr)); diff --git a/src/utils/file.h b/src/utils/file.h index 8a2fe1c70..a7e1069d4 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -20,6 +20,10 @@ #define DIR_PERMISSION (0700) #define FILE_PERMISSION (0600) +/* define it now to avoid ugly mess of includes */ +struct pgFile; +typedef struct pgFile pgFile; + typedef enum { /* message for compatibility check */ @@ -402,6 +406,7 @@ extern pioFilter_i pioGZCompressFilter(int level); extern pioFilter_i pioGZDecompressFilter(bool ignoreTruncate); extern pioWrapRead_i pioGZDecompressWrapper(bool ignoreTruncate); #endif +extern pioFilter_i pioCRC32Filter(void); typedef struct pioCRC32Counter pioCRC32Counter; #define kls__pioCRC32Counter iface__pioFilter, mth(pioFltInPlace), iface(pioFilter) @@ -410,6 +415,9 @@ extern pioCRC32Counter* pioCRC32Counter_alloc(void); extern pg_crc32 pioCRC32Counter_getCRC32(pioCRC32Counter* flt); extern int64_t pioCRC32Counter_getSize(pioCRC32Counter* flt); +typedef struct pioZeroTail pioZeroTail; +extern pioZeroTail* pioZeroTail_alloc(void); + extern pioWriteFlush_i pioDevNull_alloc(void); extern err_i pioCopyWithFilters(pioWriteFlush_i dest, pioRead_i src, From 54aabfdabb9345e550b51d99dbcb61bc45718772 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 20 Dec 2022 00:52:36 +0300 Subject: [PATCH 243/339] format previous commit a bit --- src/data.c | 5 +-- src/utils/file.c | 101 +++++++++++++++++++++++++++-------------------- src/utils/file.h | 8 +--- 3 files changed, 62 insertions(+), 52 deletions(-) diff --git a/src/data.c b/src/data.c index 356b2b0d3..53f6a887b 100644 --- a/src/data.c +++ b/src/data.c @@ -1161,9 +1161,8 @@ backup_non_data_file_internal(const char *from_fullpath, FOBJ_FUNC_ARP(); cut_zero_tail = file->forkName == cfm; - INIT_CRC32C(file->crc); - /* reset size summary */ + file->crc = 0; file->read_size = 0; file->write_size = 0; file->uncompressed_size = 0; @@ -1216,7 +1215,7 @@ send_file(const char *to_fullpath, const char *from_fullpath, bool cut_zero_tail * will be used. */ pioCRC32Counter *c = pioCRC32Counter_alloc(); - pioZeroTail *zt = pioZeroTail_alloc(); + pioCutZeroTail *zt = pioCutZeroTail_alloc(); pioFilter_i ztFlt = bind_pioFilter(zt); pioFilter_i crcFlt = bind_pioFilter(c); pioFilter_i fltrs[] = { ztFlt, crcFlt }; diff --git a/src/utils/file.c b/src/utils/file.c index 192662c13..d75fc8dbc 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -2712,13 +2712,13 @@ typedef struct pioReSeekableReader { fobj_klass(pioReSeekableReader); /* zero tail detector */ -typedef struct pioZeroTail { - int64_t read_size; - int64_t write_size; -} pioZeroTail; +typedef struct pioCutZeroTail { + uint64_t read_size; + uint64_t write_size; +} pioCutZeroTail; -#define kls__pioZeroTail iface__pioFilter, mth(fobjDispose), iface(pioFilter) -fobj_klass(pioZeroTail); +#define kls__pioCutZeroTail iface__pioFilter, iface(pioFilter) +fobj_klass(pioCutZeroTail); /* CRC32 counter */ typedef struct pioDevNull @@ -4927,18 +4927,26 @@ pioWriteFilter_fobjRepr(VSelf) (filter, self->filter.self)); } -pioZeroTail* -pioZeroTail_alloc(void) { - return $alloc(pioZeroTail, .read_size = 0, .write_size = 0); +pioCutZeroTail* +pioCutZeroTail_alloc(void) +{ + return $alloc(pioCutZeroTail, .read_size = 0, .write_size = 0); } static pioFltTransformResult -pioZeroTail_pioFltTransform(VSelf, ft_bytes_t rbuf, ft_bytes_t wbuf, err_i *err) { - Self(pioZeroTail); +pioCutZeroTail_pioFltTransform(VSelf, ft_bytes_t rbuf, ft_bytes_t wbuf, err_i *err) +{ + Self(pioCutZeroTail); pioFltTransformResult tr = {0, 0}; + size_t wbuf_len = wbuf.len; + size_t rbuf_len = rbuf.len; + size_t non_zero_len; + size_t zeroes_to_fill; + size_t to_move; + ft_bytes_t copybuf; fobj_reset_err(err); - size_t non_zero_len = find_zero_tail(rbuf.ptr, rbuf.len); + non_zero_len = find_zero_tail(rbuf.ptr, rbuf.len); /* * It is dirty trick to silence warnings in CFS GC process: * backup at least cfs header size bytes. @@ -4951,65 +4959,72 @@ pioZeroTail_pioFltTransform(VSelf, ft_bytes_t rbuf, ft_bytes_t wbuf, err_i *err) non_zero_len -= self->read_size; } - if(non_zero_len > 0) { + if (non_zero_len == 0) + { + /* pretend we read all buffer */ + self->read_size += rbuf.len; + tr.consumed += rbuf.len; + return tr; + } + + if (self->read_size > self->write_size) + { /* * Calculating how many zeroes we can actually copy. * self->read_size - self->write_size always equals the number of zero bytes. */ - ssize_t zeroes_to_fill = ft_min(self->read_size - self->write_size, wbuf.len); + zeroes_to_fill = ft_min(self->read_size - self->write_size, wbuf.len); /* Restoring the zeroes gap */ memset(wbuf.ptr, 0, zeroes_to_fill); ft_bytes_consume(&wbuf, zeroes_to_fill); self->write_size += zeroes_to_fill; - tr.produced += zeroes_to_fill; /* * At this moment, wbuf.len will be deacreased by zeroes_to_fill, so it * represents the room left in wbuf. */ - if(self->read_size == self->write_size && wbuf.len > 0) { - /* - * All zeroes are in buffer at this point so self->read_size == self->write_size - * and all we need to copy is non_zero_len bytes. This can be done in multiple - * calls and the data will not be lost since tr.consumed will be increased only - * by to_copy bytes. - */ - ft_bytes_t copybuf = ft_bytes_split(&rbuf, ft_min(wbuf.len, non_zero_len)); - size_t to_move = ft_bytes_move(&wbuf, ©buf); - - self->write_size += to_move; - tr.consumed = to_move + (to_move < non_zero_len ? 0 : rbuf.len); - self->read_size += tr.consumed; - - tr.produced += to_move; - } + } + if (self->read_size == self->write_size && wbuf.len > 0) { /* - * In case there are some unwritten zeroes left tr.consumed == 0 here and the next - * time this filter is called we'll read the same data chunk again. + * All zeroes are in buffer at this point so self->read_size == self->write_size + * and all we need to copy is non_zero_len bytes. This can be done in multiple + * calls and the data will not be lost since tr.consumed will be increased only + * by to_copy bytes. */ - } else { - // There's a zero tail, skip it for now + copybuf = ft_bytes_split(&rbuf, ft_min(wbuf.len, non_zero_len)); + to_move = ft_bytes_move(&wbuf, ©buf); + + self->write_size += to_move; + self->read_size += to_move; + non_zero_len -= to_move; + } + + /* + * If we've wrote all non_zero_len bytes, we can safely pretend we read + * all buffer. + */ + if (non_zero_len == 0) + { self->read_size += rbuf.len; - tr.consumed += rbuf.len; + ft_bytes_consume(&rbuf, rbuf.len); } + tr.consumed = rbuf_len - rbuf.len; + tr.produced = wbuf_len - wbuf.len; return tr; } static size_t -pioZeroTail_pioFltFinish(VSelf, ft_bytes_t wbuf, err_i *err) { - Self(pioZeroTail); +pioCutZeroTail_pioFltFinish(VSelf, ft_bytes_t wbuf, err_i *err) +{ + Self(pioCutZeroTail); fobj_reset_err(err); return 0; } -static void -pioZeroTail_fobjDispose(VSelf) { -} - #ifdef HAVE_LIBZ #define MAX_WBITS 15 /* 32K LZ77 window */ #define DEF_MEM_LEVEL 8 @@ -6094,7 +6109,7 @@ fobj_klass_handle(pioReadFilter, mth(fobjDispose, fobjRepr)); fobj_klass_handle(pioDevNull); fobj_klass_handle(pioCRC32Counter); fobj_klass_handle(pioReSeekableReader); -fobj_klass_handle(pioZeroTail); +fobj_klass_handle(pioCutZeroTail); #ifdef HAVE_LIBZ fobj_klass_handle(pioGZCompress, mth(fobjRepr)); diff --git a/src/utils/file.h b/src/utils/file.h index a7e1069d4..a34fa3d26 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -20,10 +20,6 @@ #define DIR_PERMISSION (0700) #define FILE_PERMISSION (0600) -/* define it now to avoid ugly mess of includes */ -struct pgFile; -typedef struct pgFile pgFile; - typedef enum { /* message for compatibility check */ @@ -415,8 +411,8 @@ extern pioCRC32Counter* pioCRC32Counter_alloc(void); extern pg_crc32 pioCRC32Counter_getCRC32(pioCRC32Counter* flt); extern int64_t pioCRC32Counter_getSize(pioCRC32Counter* flt); -typedef struct pioZeroTail pioZeroTail; -extern pioZeroTail* pioZeroTail_alloc(void); +typedef struct pioCutZeroTail pioCutZeroTail; +extern pioCutZeroTail* pioCutZeroTail_alloc(void); extern pioWriteFlush_i pioDevNull_alloc(void); From 7d48f6f620af947bd6ba84ced05e804dc3baea61 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 20 Dec 2022 01:18:47 +0300 Subject: [PATCH 244/339] fix test_page_doesnt_store_unchanged_cfm --- src/data.c | 6 ++++-- src/utils/file.c | 12 ++++++++++++ src/utils/file.h | 2 ++ 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/src/data.c b/src/data.c index 53f6a887b..7a4eec336 100644 --- a/src/data.c +++ b/src/data.c @@ -1183,8 +1183,6 @@ backup_non_data_file_internal(const char *from_fullpath, elog(ERROR, "An error occured while copying %s: %s", from_fullpath, $errmsg(err)); } - - file->uncompressed_size = file->read_size; } static err_i @@ -1232,6 +1230,10 @@ send_file(const char *to_fullpath, const char *from_fullpath, bool cut_zero_tail file->crc = pioCRC32Counter_getCRC32(c); file->read_size = pioCRC32Counter_getSize(c); file->write_size = pioCRC32Counter_getSize(c); + if (cut_zero_tail) + file->uncompressed_size = pioCutZeroTail_getReadSize(zt); + else + file->uncompressed_size = file->read_size; } cleanup: diff --git a/src/utils/file.c b/src/utils/file.c index d75fc8dbc..9dab9dd41 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -5025,6 +5025,18 @@ pioCutZeroTail_pioFltFinish(VSelf, ft_bytes_t wbuf, err_i *err) return 0; } +int64_t +pioCutZeroTail_getReadSize(pioCutZeroTail* flt) +{ + return (int64_t)flt->read_size; +} + +int64_t +pioCutZeroTail_getWriteSize(pioCutZeroTail* flt) +{ + return (int64_t)flt->write_size; +} + #ifdef HAVE_LIBZ #define MAX_WBITS 15 /* 32K LZ77 window */ #define DEF_MEM_LEVEL 8 diff --git a/src/utils/file.h b/src/utils/file.h index a34fa3d26..cb4248e84 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -413,6 +413,8 @@ extern int64_t pioCRC32Counter_getSize(pioCRC32Counter* flt); typedef struct pioCutZeroTail pioCutZeroTail; extern pioCutZeroTail* pioCutZeroTail_alloc(void); +extern int64_t pioCutZeroTail_getReadSize(pioCutZeroTail* flt); +extern int64_t pioCutZeroTail_getWriteSize(pioCutZeroTail* flt); extern pioWriteFlush_i pioDevNull_alloc(void); From 557df03504847d94a363f2f548a6778a7f70acd3 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 20 Dec 2022 01:31:13 +0300 Subject: [PATCH 245/339] remove some no more used staff --- src/pg_probackup.h | 5 - src/utils/file.c | 338 --------------------------------------------- 2 files changed, 343 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 9eaab7986..f034e4eee 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1142,11 +1142,6 @@ extern parray * pg_ptrack_get_pagemapset(PGconn *backup_conn, const char *ptrack extern FILE* open_local_file_rw(const char *to_fullpath, char **out_buf, uint32 buf_size); /* FIO */ -extern int fio_send_file(const char *from_fullpath, FILE* out, bool cut_zero_tail, - pgFile *file, char **errormsg); -extern int fio_send_file_local(const char *from_fullpath, FILE* out, bool cut_zero_tail, - pgFile *file, char **errormsg); - extern bool pgut_rmtree(const char *path, bool rmtopdir, bool strict); extern void pgut_setenv(const char *key, const char *val); diff --git a/src/utils/file.c b/src/utils/file.c index 9dab9dd41..840098dab 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1013,279 +1013,6 @@ fio_send_file_crc(send_file_state* st, char *buf, size_t len) COMP_CRC32C(st->crc, buf, len); } -static bool -fio_send_file_write(FILE* out, send_file_state* st, char *buf, size_t len) -{ - if (len == 0) - return true; - - if (st->read_size > st->write_size && - fseeko(out, st->read_size, SEEK_SET) != 0) - { - return false; - } - - if (fwrite(buf, 1, len, out) != len) - { - return false; - } - - st->read_size += len; - st->write_size = st->read_size; - - return true; -} - -/* Receive chunks of data and write them to destination file. - * Return codes: - * SEND_OK (0) - * FILE_MISSING (-1) - * OPEN_FAILED (-2) - * READ_FAILED (-3) - * WRITE_FAILED (-4) - * - * OPEN_FAILED and READ_FAIL should also set errormsg. - * If pgFile is not NULL then we must calculate crc and read_size for it. - */ -int -fio_send_file(const char *from_fullpath, FILE* out, bool cut_zero_tail, - pgFile *file, char **errormsg) -{ - fio_header hdr; - int exit_code = SEND_OK; - size_t path_len = strlen(from_fullpath) + 1; - char *buf = pgut_malloc(CHUNK_SIZE); /* buffer */ - send_file_state st = {false, 0, 0, 0}; - - memset(&hdr, 0, sizeof(hdr)); - - if (file) - { - st.calc_crc = true; - st.crc = file->crc; - } - - hdr.cop = FIO_SEND_FILE; - hdr.size = path_len; - -// elog(VERBOSE, "Thread [%d]: Attempting to open remote WAL file '%s'", -// thread_num, from_fullpath); - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(fio_stdout, from_fullpath, path_len), path_len); - - for (;;) - { - /* receive data */ - IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); - - if (hdr.cop == FIO_SEND_FILE_EOF) - { - if (st.write_size < st.read_size) - { - if (!cut_zero_tail) - { - /* - * We still need to calc crc for zero tail. - */ - fio_send_file_crc(&st, NULL, 0); - - /* - * Let's write single zero byte to the end of file to restore - * logical size. - * Well, it would be better to use ftruncate here actually, - * but then we need to change interface. - */ - st.read_size -= 1; - buf[0] = 0; - if (!fio_send_file_write(out, &st, buf, 1)) - { - exit_code = WRITE_FAILED; - break; - } - } - } - - if (file) - { - file->crc = st.crc; - file->read_size = st.read_size; - file->write_size = st.write_size; - } - break; - } - else if (hdr.cop == FIO_ERROR) - { - /* handle error, reported by the agent */ - if (hdr.size > 0) - { - IO_CHECK(fio_read_all(fio_stdin, buf, hdr.size), hdr.size); - *errormsg = pgut_malloc(hdr.size); - snprintf(*errormsg, hdr.size, "%s", buf); - } - exit_code = hdr.arg; - break; - } - else if (hdr.cop == FIO_PAGE) - { - Assert(hdr.size <= CHUNK_SIZE); - IO_CHECK(fio_read_all(fio_stdin, buf, hdr.size), hdr.size); - - /* We have received a chunk of data data, lets write it out */ - fio_send_file_crc(&st, buf, hdr.size); - if (!fio_send_file_write(out, &st, buf, hdr.size)) - { - exit_code = WRITE_FAILED; - break; - } - } - else if (hdr.cop == FIO_PAGE_ZERO) - { - Assert(hdr.size == 0); - Assert(hdr.arg <= CHUNK_SIZE); - - /* - * We have received a chunk of zero data, lets just think we - * wrote it. - */ - st.read_size += hdr.arg; - } - else - { - /* TODO: fio_disconnect may get assert fail when running after this */ - elog(ERROR, "Remote agent returned message of unexpected type: %i", hdr.cop); - } - } - - if (exit_code < OPEN_FAILED) - fio_disconnect(); /* discard possible pending data in pipe */ - - pg_free(buf); - return exit_code; -} - -int -fio_send_file_local(const char *from_fullpath, FILE* out, bool cut_zero_tail, - pgFile *file, char **errormsg) -{ - FILE* in; - char* buf; - size_t read_len, non_zero_len; - int exit_code = SEND_OK; - send_file_state st = {false, 0, 0, 0}; - - if (file) - { - st.calc_crc = true; - st.crc = file->crc; - } - - /* open source file for read */ - in = fopen(from_fullpath, PG_BINARY_R); - if (in == NULL) - { - /* maybe deleted, it's not error in case of backup */ - if (errno == ENOENT) - return FILE_MISSING; - - - *errormsg = psprintf("Cannot open file \"%s\": %s", from_fullpath, - strerror(errno)); - return OPEN_FAILED; - } - - /* disable stdio buffering for local input/output files to avoid triple buffering */ - setvbuf(in, NULL, _IONBF, BUFSIZ); - setvbuf(out, NULL, _IONBF, BUFSIZ); - - /* allocate 64kB buffer */ - buf = pgut_malloc(CHUNK_SIZE); - - /* copy content and calc CRC */ - for (;;) - { - read_len = fread(buf, 1, CHUNK_SIZE, in); - - if (ferror(in)) - { - *errormsg = psprintf("Cannot read from file \"%s\": %s", - from_fullpath, strerror(errno)); - exit_code = READ_FAILED; - goto cleanup; - } - - if (read_len > 0) - { - non_zero_len = find_zero_tail(buf, read_len); - /* - * It is dirty trick to silence warnings in CFS GC process: - * backup at least cfs header size bytes. - */ - if (st.read_size + non_zero_len < PAGE_ZEROSEARCH_FINE_GRANULARITY && - st.read_size + read_len > 0) - { - non_zero_len = Min(PAGE_ZEROSEARCH_FINE_GRANULARITY, - st.read_size + read_len); - non_zero_len -= st.read_size; - } - if (non_zero_len > 0) - { - fio_send_file_crc(&st, buf, non_zero_len); - if (!fio_send_file_write(out, &st, buf, non_zero_len)) - { - exit_code = WRITE_FAILED; - goto cleanup; - } - } - if (non_zero_len < read_len) - { - /* Just pretend we wrote it. */ - st.read_size += read_len - non_zero_len; - } - } - - if (feof(in)) - break; - } - - if (st.write_size < st.read_size) - { - if (!cut_zero_tail) - { - /* - * We still need to calc crc for zero tail. - */ - fio_send_file_crc(&st, NULL, 0); - - /* - * Let's write single zero byte to the end of file to restore - * logical size. - * Well, it would be better to use ftruncate here actually, - * but then we need to change interface. - */ - st.read_size -= 1; - buf[0] = 0; - if (!fio_send_file_write(out, &st, buf, 1)) - { - exit_code = WRITE_FAILED; - goto cleanup; - } - } - } - - if (file) - { - file->crc = st.crc; - file->read_size = st.read_size; - file->write_size = st.write_size; - } - - cleanup: - free(buf); - fclose(in); - return exit_code; -} - /* Send open file content * On error we return FIO_ERROR message with following codes * FIO_ERROR: @@ -1379,68 +1106,6 @@ fio_send_file_content_impl(int fd, int out, const char* path) return true; } -/* Send file content - * On error we return FIO_ERROR message with following codes - * FIO_ERROR: - * FILE_MISSING (-1) - * OPEN_FAILED (-2) - * READ_FAILED (-3) - * - * FIO_PAGE - * FIO_SEND_FILE_EOF - * - */ -static void -fio_send_file_impl(int out, const char* path) -{ - int fd; - int save_errno; - fio_header hdr; - char *errormsg = NULL; - - /* open source file for read */ - /* TODO: check that file is regular file */ - fd = open(path, O_RDONLY | PG_BINARY); - if (fd < 0) - { - hdr.cop = FIO_ERROR; - - /* do not send exact wording of ENOENT error message - * because it is a very common error in our case, so - * error code is enough. - */ - if (errno == ENOENT) - { - hdr.arg = FILE_MISSING; - hdr.size = 0; - } - else - { - save_errno = errno; - hdr.arg = OPEN_FAILED; - errormsg = pgut_malloc(ERRMSG_MAX_LEN); - /* Construct the error message */ - snprintf(errormsg, ERRMSG_MAX_LEN, "Cannot open file '%s': %s", - path, strerror(save_errno)); - hdr.size = strlen(errormsg) + 1; - } - - /* send header and message */ - IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); - if (errormsg) - { - IO_CHECK(fio_write_all(out, errormsg, hdr.size), hdr.size); - free(errormsg); - } - - return; - } - - fio_send_file_content_impl(fd, out, path); - - close(fd); -} - /* * Read the local file to compute its CRC. * We cannot make decision about file decompression because @@ -2106,9 +1771,6 @@ fio_communicate(int in, int out) case FIO_REMOVE_DIR: fio_remove_dir_impl(out, buf); break; - case FIO_SEND_FILE: - fio_send_file_impl(out, buf); - break; case FIO_SEND_FILE_CONTENT: fio_send_file_content_impl(fd[hdr.handle], out, buf); break; From 89b28fe885c0df5a3b4102e804f3388351f56ed6 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 20 Dec 2022 01:35:01 +0300 Subject: [PATCH 246/339] remove unused trick in fio_send_file_content_impl --- src/utils/file.c | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index 840098dab..f5b52cebe 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1066,17 +1066,6 @@ fio_send_file_content_impl(int fd, int out, const char* path) /* send chunk */ non_zero_len = find_zero_tail(buf, read_len); - /* - * It is dirty trick to silence warnings in CFS GC process: - * backup at least cfs header size bytes. - */ - if (read_size + non_zero_len < PAGE_ZEROSEARCH_FINE_GRANULARITY && - read_size + read_len > 0) - { - non_zero_len = Min(PAGE_ZEROSEARCH_FINE_GRANULARITY, - read_size + read_len); - non_zero_len -= read_size; - } if (non_zero_len > 0) { From 4cf6270f99230dc6981a2b39a4f00d1ea96edae9 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 20 Dec 2022 02:04:33 +0300 Subject: [PATCH 247/339] refactor pgFileNew: now it calculates crc by itself it will reduce calls to pgFileGetCRC32C as well --- src/backup.c | 10 +--------- src/dir.c | 29 +++++++++++++++++------------ src/pg_probackup.h | 3 +-- src/stream.c | 34 +++++++++++----------------------- 4 files changed, 30 insertions(+), 46 deletions(-) diff --git a/src/backup.c b/src/backup.c index c1f5d8065..f6e768f67 100644 --- a/src/backup.c +++ b/src/backup.c @@ -1816,15 +1816,7 @@ pg_stop_backup_write_file_helper(pioDrive_i drive, const char *path, const char */ if (file_list) { - file = pgFileNew(full_filename, filename, true, 0, drive); - - if (file->kind == PIO_KIND_REGULAR) - { - file->crc = pgFileGetCRC32C(full_filename, false); - - file->write_size = file->size; - file->uncompressed_size = file->size; - } + file = pgFileNew(full_filename, filename, true, true, drive); parray_append(file_list, file); } } diff --git a/src/dir.c b/src/dir.c index bfc043118..e628e4d45 100644 --- a/src/dir.c +++ b/src/dir.c @@ -143,7 +143,7 @@ pgFileSetStat(pgFile* file, pio_stat_t st) pgFile * pgFileNew(const char *path, const char *rel_path, bool follow_symlink, - int external_dir_num, pioDrive_i drive) + bool crc, pioDrive_i drive) { FOBJ_FUNC_ARP(); pio_stat_t st; @@ -154,15 +154,25 @@ pgFileNew(const char *path, const char *rel_path, bool follow_symlink, st = $i(pioStat, drive, .path = path, .follow_symlink = follow_symlink, .err = &err); if ($haserr(err)) { - /* file not found is not an error case */ - if (getErrno(err) == ENOENT) - return NULL; ft_logerr(FT_FATAL, $errmsg(err), "pgFileNew"); } file = pgFileInit(rel_path); pgFileSetStat(file, st); - file->external_dir_num = external_dir_num; + if (file->kind == PIO_KIND_REGULAR) + { + file->write_size = file->size; + file->uncompressed_size = file->size; + } + if (file->kind == PIO_KIND_REGULAR && crc) + { + file->crc = $i(pioGetCRC32, drive, .path = path, + .compressed = false, .err = &err); + if ($haserr(err)) { + pgFileFree(file); + ft_logerr(FT_FATAL, $errmsg(err), "pgFileNew"); + } + } return file; } @@ -173,8 +183,7 @@ pgFileInit(const char *rel_path) pgFile *file; char *file_name = NULL; - file = (pgFile *) pgut_malloc(sizeof(pgFile)); - MemSet(file, 0, sizeof(pgFile)); + file = (pgFile *) pgut_malloc0(sizeof(pgFile)); file->rel_path = pgut_strdup(rel_path); canonicalize_path(file->rel_path); @@ -1525,11 +1534,7 @@ write_database_map(pgBackup *backup, parray *database_map, parray *backup_files_ ft_strbuf_free(&buf); /* Add metadata to backup_content.control */ - file = pgFileNew(database_map_path, DATABASE_MAP, true, 0, drive); - file->crc = pgFileGetCRC32C(database_map_path, false); - file->write_size = file->size; - file->uncompressed_size = file->size; - + file = pgFileNew(database_map_path, DATABASE_MAP, true, true, drive); parray_append(backup_files_list, file); } diff --git a/src/pg_probackup.h b/src/pg_probackup.h index f034e4eee..72458d251 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1004,8 +1004,7 @@ extern bool dir_is_empty(const char *path, fio_location location); extern bool fileExists(const char *path, fio_location location); extern pgFile *pgFileNew(const char *path, const char *rel_path, - bool follow_symlink, int external_dir_num, - pioDrive_i drive); + bool follow_symlink, bool crc, pioDrive_i drive); extern pgFile *pgFileInit(const char *rel_path); extern void pgFileFree(void *file); diff --git a/src/stream.c b/src/stream.c index 027d70c05..1b97a64d1 100644 --- a/src/stream.c +++ b/src/stream.c @@ -659,7 +659,10 @@ add_walsegment_to_filelist(parray *filelist, uint32 timeline, XLogRecPtr xlogpos join_path_components(wal_segment_fullpath, basedir, wal_segment_name); join_path_components(wal_segment_relpath, PG_XLOG_DIR, wal_segment_name); - file = pgFileNew(wal_segment_fullpath, wal_segment_relpath, false, 0, drive); + file = pgFileNew(wal_segment_fullpath, wal_segment_relpath, false, do_crc, drive); + ft_assert(file->size == xlog_seg_size, + "Wal segment file '%s' is of unexpected size %lld", + wal_segment_fullpath, (long long)file->size); /* * Check if file is already in the list @@ -671,22 +674,15 @@ add_walsegment_to_filelist(parray *filelist, uint32 timeline, XLogRecPtr xlogpos if (existing_file) { - if (do_crc) - (*existing_file)->crc = pgFileGetCRC32C(wal_segment_fullpath, false); - (*existing_file)->write_size = xlog_seg_size; - (*existing_file)->uncompressed_size = xlog_seg_size; + (*existing_file)->crc = file->crc; + (*existing_file)->size = file->size; + (*existing_file)->write_size = file->write_size; + (*existing_file)->uncompressed_size = file->uncompressed_size; + + pgFileFree(file); return; } - - if (do_crc) - file->crc = pgFileGetCRC32C(wal_segment_fullpath, false); - - /* Should we recheck it using stat? */ - file->write_size = xlog_seg_size; - file->uncompressed_size = xlog_seg_size; - - /* append file to filelist */ parray_append(filelist, file); } @@ -708,14 +704,6 @@ add_history_file_to_filelist(parray *filelist, uint32 timeline, char *basedir) join_path_components(fullpath, basedir, filename); join_path_components(relpath, PG_XLOG_DIR, filename); - file = pgFileNew(fullpath, relpath, false, 0, drive); - - /* calculate crc */ - if (do_crc) - file->crc = pgFileGetCRC32C(fullpath, false); - file->write_size = file->size; - file->uncompressed_size = file->size; - - /* append file to filelist */ + file = pgFileNew(fullpath, relpath, false, do_crc, drive); parray_append(filelist, file); } From 78360660534c9fb7c969668658f140c8fc02feed Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 20 Dec 2022 02:29:10 +0300 Subject: [PATCH 248/339] validate.c: remove direct usage of pgFileGetCRC32C --- src/utils/file.h | 2 +- src/validate.c | 47 +++++++++++++++++++++++++++++++++++++---------- 2 files changed, 38 insertions(+), 11 deletions(-) diff --git a/src/utils/file.h b/src/utils/file.h index cb4248e84..cd890046d 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -280,7 +280,7 @@ fobj_iface(pioPagesIterator); #define mth__pioRename err_i, (path_t, old_path), (path_t, new_path) #define mth__pioExists bool, (path_t, path), (pio_file_kind_e, expected_kind, PIO_KIND_REGULAR), \ (err_i *, err) -#define mth__pioGetCRC32 pg_crc32, (path_t, path), (bool, compressed), \ +#define mth__pioGetCRC32 pg_crc32, (path_t, path), (bool, compressed, false), \ (err_i *, err) /* Compare, that filename1 and filename2 is the same file */ #define mth__pioFilesAreSame bool, (path_t, file1), (path_t, file2) diff --git a/src/validate.c b/src/validate.c index d7399826f..428982763 100644 --- a/src/validate.c +++ b/src/validate.c @@ -22,6 +22,7 @@ static bool skipped_due_to_lock = false; typedef struct { + pioDrive_i drive; const char *base_path; parray *files; bool corrupted; @@ -143,6 +144,7 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params) { validate_files_arg *arg = &(threads_args[i]); + arg->drive = backup->backup_location; arg->base_path = backup->database_dir; arg->files = files; arg->corrupted = false; @@ -230,14 +232,18 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params) static void * pgBackupValidateFiles(void *arg) { + FOBJ_FUNC_ARP(); int i; validate_files_arg *arguments = (validate_files_arg *)arg; int num_files = parray_num(arguments->files); pg_crc32 crc; + pioDrive_i drive = arguments->drive; + err_i err; for (i = 0; i < num_files; i++) { - struct stat st; + FOBJ_LOOP_ARP(); + pio_stat_t st; pgFile *file = (pgFile *) parray_get(arguments->files, i); char file_fullpath[MAXPGPATH]; @@ -302,9 +308,11 @@ pgBackupValidateFiles(void *arg) join_path_components(file_fullpath, arguments->base_path, file->rel_path); /* TODO: it is redundant to check file existence using stat */ - if (stat(file_fullpath, &st) == -1) + st = $i(pioStat, drive, .path = file_fullpath, .follow_symlink = false, + .err = &err); + if ($haserr(err)) { - if (errno == ENOENT) + if (getErrno(err) == ENOENT) elog(WARNING, "Backup file \"%s\" is not found", file_fullpath); else elog(WARNING, "Cannot stat backup file \"%s\": %s", @@ -313,10 +321,10 @@ pgBackupValidateFiles(void *arg) break; } - if (file->write_size != st.st_size) + if (file->write_size != st.pst_size) { elog(WARNING, "Invalid size of backup file \"%s\" : %lld. Expected %lld", - file_fullpath, (long long) st.st_size, (long long)file->write_size); + file_fullpath, (long long) st.pst_size, (long long)file->write_size); arguments->corrupted = true; break; } @@ -349,15 +357,25 @@ pgBackupValidateFiles(void *arg) else #if PG_VERSION_NUM >= 120000 { - Assert(arguments->backup_version >= 20025); - crc = pgFileGetCRC32C(file_fullpath, false); + ft_assert(arguments->backup_version >= 20025); + crc = $i(pioGetCRC32, drive, .path = file_fullpath, + .err = &err); } #else /* PG_VERSION_NUM < 120000 */ if (arguments->backup_version <= 20021 || arguments->backup_version >= 20025) - crc = pgFileGetCRC32C(file_fullpath, false); + crc = $i(pioGetCRC32, drive, .path = file_fullpath, .err = &err); else + { + ft_assert(!$i(pioIsRemote, drive)); crc = pgFileGetCRC32(file_fullpath, false); + } #endif /* PG_VERSION_NUM < 120000 */ + if ($haserr(err)) + { + ft_logerr(FT_WARNING, $errmsg(err), "Backup file CRC"); + arguments->corrupted = true; + break; + } if (crc != file->crc) { @@ -719,11 +737,13 @@ do_validate_instance(InstanceState *instanceState) bool validate_tablespace_map(pgBackup *backup, bool no_validate) { + FOBJ_FUNC_ARP(); char map_path[MAXPGPATH]; pgFile *dummy = NULL; pgFile **tablespace_map = NULL; pg_crc32 crc; parray *files = get_backup_filelist(backup, true); + err_i err; parray_qsort(files, pgFileCompareRelPathWithExternal); join_path_components(map_path, backup->database_dir, PG_TABLESPACE_MAP_FILE); @@ -750,14 +770,21 @@ validate_tablespace_map(pgBackup *backup, bool no_validate) { #if PG_VERSION_NUM >= 120000 Assert(parse_program_version(backup->program_version) >= 20025); - crc = pgFileGetCRC32C(map_path, false); + crc = $i(pioGetCRC32, backup->backup_location, .path = map_path, + .err = &err); #else /* PG_VERSION_NUM < 120000 */ if (parse_program_version(backup->program_version) <= 20021 || parse_program_version(backup->program_version) >= 20025) - crc = pgFileGetCRC32C(map_path, false); + crc = $i(pioGetCRC32, backup->backup_location, .path = map_path, + .err = &err); else + { + ft_assert(!$i(pioIsRemote, drive)); crc = pgFileGetCRC32(map_path, false); + } #endif /* PG_VERSION_NUM < 120000 */ + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Tablespace map file CRC"); if ((*tablespace_map)->crc != crc) elog(ERROR, "Invalid CRC of tablespace map file \"%s\" : %X. Expected %X, " From 9673496b47ef8a06ca2c045502c76977d5be1dba Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 20 Dec 2022 02:59:01 +0300 Subject: [PATCH 249/339] make pioGetCRC32 to use pioCRC32Counter --- src/utils/file.c | 73 ++++++++++++++++++++++++++++++++++++++++-------- src/utils/file.h | 3 +- 2 files changed, 63 insertions(+), 13 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index f5b52cebe..48fbd3f6e 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1791,6 +1791,20 @@ fio_communicate(int in, int out) crc = pgFileGetCRC32C(buf, (hdr.arg & GET_CRC32_MISSING_OK) != 0); IO_CHECK(fio_write_all(out, &crc, sizeof(crc)), sizeof(crc)); break; + case PIO_GET_CRC32: + crc = $i(pioGetCRC32, drive, .path = buf, + .compressed = (hdr.arg & GET_CRC32_DECOMPRESS) != 0, + .truncated = (hdr.arg & GET_CRC32_TRUNCATED) != 0, + .err = &err); + if ($haserr(err)) + fio_send_pio_err(out, err); + else + { + hdr.size = 0; + hdr.arg = crc; + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); + } + break; case FIO_GET_CHECKSUM_MAP: fio_get_checksum_map_impl(buf, out); break; @@ -2659,15 +2673,41 @@ pioLocalDrive_pioRename(VSelf, path_t old_path, path_t new_path) } static pg_crc32 -pioLocalDrive_pioGetCRC32(VSelf, path_t path, bool compressed, err_i *err) +pioLocalDrive_pioGetCRC32(VSelf, path_t path, + bool compressed, bool truncated, + err_i *err) { + FOBJ_FUNC_ARP(); + Self(pioLocalDrive); fobj_reset_err(err); - elog(VERBOSE, "Local Drive calculate crc32 for '%s', compressed=%d", - path, compressed); - if (compressed) - return pgFileGetCRC32Cgz(path, false); - else - return pgFileGetCRC32C(path, false); + pioReadStream_i file; + pioRead_i read; + pioCRC32Counter* crc; + + elog(VERBOSE, "Local Drive calculate crc32 for '%s', compressed=%d, truncated=%d", + path, compressed, truncated); + + file = $(pioOpenReadStream, self, .path = path, .err = err); + if ($haserr(*err)) + { + $iresult(*err); + return 0; + } + + read = $reduce(pioRead, file); + if (compressed) + read = pioWrapReadFilter(read, pioGZDecompressFilter(false), + CHUNK_SIZE); + if (truncated) + read = pioWrapReadFilter(read, $bind(pioFilter, pioCutZeroTail_alloc()), + CHUNK_SIZE); + crc = pioCRC32Counter_alloc(); + read = pioWrapReadFilter(read, $bind(pioFilter, crc), CHUNK_SIZE); + *err = pioCopy(pioDevNull_alloc(), read); + $iresult(*err); + $i(pioClose, file); // ignore error + + return pioCRC32Counter_getCRC32(crc); } static bool @@ -3443,29 +3483,38 @@ pioRemoteDrive_pioRename(VSelf, path_t old_path, path_t new_path) } static pg_crc32 -pioRemoteDrive_pioGetCRC32(VSelf, path_t path, bool compressed, err_i *err) +pioRemoteDrive_pioGetCRC32(VSelf, path_t path, + bool compressed, bool truncated, + err_i *err) { fio_header hdr; size_t path_len = strlen(path) + 1; - pg_crc32 crc = 0; fobj_reset_err(err); fio_ensure_remote(); - hdr.cop = FIO_GET_CRC32; + hdr.cop = PIO_GET_CRC32; hdr.handle = -1; hdr.size = path_len; hdr.arg = 0; if (compressed) hdr.arg = GET_CRC32_DECOMPRESS; + if (truncated) + hdr.arg |= GET_CRC32_TRUNCATED; elog(VERBOSE, "Remote Drive calculate crc32 for '%s', hdr.arg=%d", path, compressed); IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_write_all(fio_stdout, path, path_len), path_len); - IO_CHECK(fio_read_all(fio_stdin, &crc, sizeof(crc)), sizeof(crc)); + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + + if (hdr.cop == FIO_PIO_ERROR) + { + *err = fio_receive_pio_err(&hdr); + return 0; + } - return crc; + return hdr.arg; } static bool diff --git a/src/utils/file.h b/src/utils/file.h index cd890046d..9dc55f8ca 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -60,6 +60,7 @@ typedef enum FIO_ITERATE_PAGES, FIO_ITERATE_DATA, FIO_ITERATE_EOF, + PIO_GET_CRC32, PIO_OPEN_REWRITE, PIO_OPEN_WRITE, PIO_WRITE_ASYNC, @@ -281,7 +282,7 @@ fobj_iface(pioPagesIterator); #define mth__pioExists bool, (path_t, path), (pio_file_kind_e, expected_kind, PIO_KIND_REGULAR), \ (err_i *, err) #define mth__pioGetCRC32 pg_crc32, (path_t, path), (bool, compressed, false), \ - (err_i *, err) + (bool, truncated, false), (err_i *, err) /* Compare, that filename1 and filename2 is the same file */ #define mth__pioFilesAreSame bool, (path_t, file1), (path_t, file2) #define mth__pioIsRemote bool From 5898a66c3985e5ecda18b97f093c3696ae21ba4a Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 20 Dec 2022 04:47:16 +0300 Subject: [PATCH 250/339] huge fio_location->pioDrive_i refactor (still partial) I've started with attempt to remove fio_get_crc32 from backup_non_data_file. It led to huge replacement from fio_location to pioDrive_i. I didn't want it :-( But it should be done nevertheless. --- src/archive.c | 2 +- src/backup.c | 15 ++++++--- src/catchup.c | 40 +++++++++++++---------- src/checkdb.c | 4 +-- src/data.c | 45 +++++++++++++++++--------- src/init.c | 11 ++++--- src/merge.c | 4 ++- src/pg_probackup.c | 8 ++--- src/pg_probackup.h | 30 ++++++++---------- src/restore.c | 19 +++++++---- src/util.c | 79 ++++++++++++++++++++++++---------------------- src/validate.c | 12 +++---- 12 files changed, 153 insertions(+), 116 deletions(-) diff --git a/src/archive.c b/src/archive.c index 16f292194..f958c1903 100644 --- a/src/archive.c +++ b/src/archive.c @@ -643,7 +643,7 @@ do_archive_get(InstanceState *instanceState, InstanceConfig *instance, const cha num_threads = n_actual_threads; elog(VERBOSE, "Obtaining XLOG_SEG_SIZE from pg_control file"); - instance->xlog_seg_size = get_xlog_seg_size(current_dir); + instance->xlog_seg_size = get_xlog_seg_size(instanceState->database_location, current_dir); /* Prefetch optimization kicks in only if simple XLOG segments is requested * and batching is enabled. diff --git a/src/backup.c b/src/backup.c index f6e768f67..ebeeed9a0 100644 --- a/src/backup.c +++ b/src/backup.c @@ -441,6 +441,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, backup_files_arg *arg = &(threads_args[i]); arg->nodeInfo = nodeInfo; + arg->instanceState = instanceState; arg->from_root = instance_config.pgdata; arg->to_root = current.database_dir; arg->external_prefix = external_prefix; @@ -517,7 +518,8 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, elog(ERROR, "Failed to find file \"%s\" in backup filelist.", XLOG_CONTROL_FILE); - set_min_recovery_point(pg_control, current.database_dir, current.stop_lsn); + set_min_recovery_point(instanceState->database_location, instanceState->backup_location, + pg_control, current.database_dir, current.stop_lsn); } /* close and sync page header map */ @@ -804,7 +806,7 @@ do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, * instance we opened connection to. And that target backup database PGDATA * belogns to the same instance. */ - check_system_identifiers(backup_conn, instance_config.pgdata); + check_system_identifiers(instanceState->database_location, backup_conn, instance_config.pgdata); /* below perform checks specific for backup command */ if (!RetrieveWalSegSize(backup_conn)) @@ -974,12 +976,12 @@ check_server_version(PGconn *conn, PGNodeInfo *nodeInfo) * All system identifiers must be equal. */ void -check_system_identifiers(PGconn *conn, const char *pgdata) +check_system_identifiers(pioDrive_i drive, PGconn *conn, const char *pgdata) { uint64 system_id_conn; uint64 system_id_pgdata; - system_id_pgdata = get_system_identifier(FIO_DB_HOST, pgdata, false); + system_id_pgdata = get_system_identifier(drive, pgdata, false); system_id_conn = get_remote_system_identifier(conn); /* for checkdb check only system_id_pgdata and system_id_conn */ @@ -1951,6 +1953,8 @@ backup_files(void *arg) backup_files_arg *arguments = (backup_files_arg *) arg; int n_backup_files_list = parray_num(arguments->files_list); + pioDrive_i db_drive = arguments->instanceState->database_location; + pioDrive_i backup_drive = arguments->instanceState->backup_location; prev_time = current.start_time; @@ -2047,7 +2051,8 @@ backup_files(void *arg) } else { - backup_non_data_file(file, prev_file, from_fullpath, to_fullpath, + backup_non_data_file(db_drive, backup_drive, + file, prev_file, from_fullpath, to_fullpath, current.backup_mode, current.parent_backup, true); } diff --git a/src/catchup.c b/src/catchup.c index 087b74fc6..1c62027a9 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -26,8 +26,9 @@ /* * Catchup routines */ -static PGconn *catchup_init_state(PGNodeInfo *source_node_info, const char *source_pgdata, const char *dest_pgdata); -static void catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, const char *source_pgdata, +static PGconn *catchup_init_state(pioDrive_i src_drive, PGNodeInfo *source_node_info, const char *source_pgdata, const char *dest_pgdata); +static void catchup_preflight_checks(pioDrive_i source_drive, pioDrive_i dest_drive, + PGNodeInfo *source_node_info, PGconn *source_conn, const char *source_pgdata, const char *dest_pgdata); static void catchup_check_tablespaces_existance_in_tbsmapping(PGconn *conn); static parray* catchup_get_tli_history(ConnectionOptions *conn_opt, TimeLineID tli); @@ -38,7 +39,7 @@ static parray* catchup_get_tli_history(ConnectionOptions *conn_opt, TimeLineID t * Prepare for work: fill some globals, open connection to source database */ static PGconn * -catchup_init_state(PGNodeInfo *source_node_info, const char *source_pgdata, const char *dest_pgdata) +catchup_init_state(pioDrive_i src_drive, PGNodeInfo *source_node_info, const char *source_pgdata, const char *dest_pgdata) { PGconn *source_conn; @@ -46,8 +47,8 @@ catchup_init_state(PGNodeInfo *source_node_info, const char *source_pgdata, cons pgNodeInit(source_node_info); /* Get WAL segments size and system ID of source PG instance */ - instance_config.xlog_seg_size = get_xlog_seg_size(source_pgdata); - instance_config.system_identifier = get_system_identifier(FIO_DB_HOST, source_pgdata, false); + instance_config.xlog_seg_size = get_xlog_seg_size(src_drive, source_pgdata); + instance_config.system_identifier = get_system_identifier(src_drive, source_pgdata, false); current.start_time = time(NULL); strlcpy(current.program_version, PROGRAM_VERSION, sizeof(current.program_version)); @@ -83,7 +84,8 @@ catchup_init_state(PGNodeInfo *source_node_info, const char *source_pgdata, cons * this function is for checks, that can be performed without modification of data on disk */ static void -catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, +catchup_preflight_checks(pioDrive_i source_drive, pioDrive_i dest_drive, + PGNodeInfo *source_node_info, PGconn *source_conn, const char *source_pgdata, const char *dest_pgdata) { /* TODO @@ -156,7 +158,7 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, uint64 source_conn_id, source_id, dest_id; source_conn_id = get_remote_system_identifier(source_conn); - source_id = get_system_identifier(FIO_DB_HOST, source_pgdata, false); /* same as instance_config.system_identifier */ + source_id = get_system_identifier(source_drive, source_pgdata, false); /* same as instance_config.system_identifier */ if (source_conn_id != source_id) elog(ERROR, "Database identifiers mismatch: we connected to DB id %llu, but in \"%s\" we found id %llu", @@ -164,7 +166,7 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, if (current.backup_mode != BACKUP_MODE_FULL) { - dest_id = get_system_identifier(FIO_LOCAL_HOST, dest_pgdata, false); + dest_id = get_system_identifier(dest_drive, dest_pgdata, false); if (source_conn_id != dest_id) elog(ERROR, "Database identifiers mismatch: we connected to DB id %llu, but in \"%s\" we found id %llu", (long long)source_conn_id, dest_pgdata, (long long)dest_id); @@ -192,7 +194,7 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, RedoParams dest_redo = { 0, InvalidXLogRecPtr, 0 }; /* fill dest_redo.lsn and dest_redo.tli */ - get_redo(FIO_LOCAL_HOST, dest_pgdata, &dest_redo); + get_redo(dest_drive, dest_pgdata, &dest_redo); elog(LOG, "source.tli = %X, dest_redo.lsn = %X/%X, dest_redo.tli = %X", current.tli, (uint32) (dest_redo.lsn >> 32), (uint32) dest_redo.lsn, dest_redo.tli); @@ -363,6 +365,8 @@ typedef struct static void * catchup_thread_runner(void *arg) { + pioDrive_i drive_from = pioDriveForLocation(FIO_DB_HOST); + pioDrive_i drive_to = pioDriveForLocation(FIO_BACKUP_HOST); int i; char from_fullpath[MAXPGPATH]; char to_fullpath[MAXPGPATH]; @@ -428,7 +432,8 @@ catchup_thread_runner(void *arg) } else { - backup_non_data_file(file, dest_file, from_fullpath, to_fullpath, + backup_non_data_file(drive_from, drive_to, + file, dest_file, from_fullpath, to_fullpath, arguments->backup_mode, current.parent_backup, true); } @@ -613,6 +618,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, parray *exclude_absolute_paths_list, parray *exclude_relative_paths_list) { pioDrive_i local_location = pioDriveForLocation(FIO_LOCAL_HOST); + pioDrive_i db_location = pioDriveForLocation(FIO_DB_HOST); PGconn *source_conn = NULL; PGNodeInfo source_node_info; parray *source_filelist = NULL; @@ -634,8 +640,9 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, err_i err = $noerr(); - source_conn = catchup_init_state(&source_node_info, source_pgdata, dest_pgdata); - catchup_preflight_checks(&source_node_info, source_conn, source_pgdata, dest_pgdata); + source_conn = catchup_init_state(db_location, &source_node_info, source_pgdata, dest_pgdata); + catchup_preflight_checks(db_location, local_location, + &source_node_info, source_conn, source_pgdata, dest_pgdata); /* we need to sort --exclude_path's for future searching */ if (exclude_absolute_paths_list != NULL) @@ -652,7 +659,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, filter_filelist(dest_filelist, dest_pgdata, exclude_absolute_paths_list, exclude_relative_paths_list, "Destination"); // fill dest_redo.lsn and dest_redo.tli - get_redo(FIO_LOCAL_HOST, dest_pgdata, &dest_redo); + get_redo(local_location, dest_pgdata, &dest_redo); elog(INFO, "syncLSN = %X/%X", (uint32) (dest_redo.lsn >> 32), (uint32) dest_redo.lsn); /* @@ -990,8 +997,8 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, char to_fullpath[MAXPGPATH]; join_path_components(from_fullpath, source_pgdata, source_pg_control_file->rel_path); join_path_components(to_fullpath, dest_pgdata, source_pg_control_file->rel_path); - copy_pgcontrol_file(FIO_DB_HOST, from_fullpath, - FIO_LOCAL_HOST, to_fullpath, source_pg_control_file); + copy_pgcontrol_file(db_location, from_fullpath, + local_location, to_fullpath, source_pg_control_file); transfered_datafiles_bytes += source_pg_control_file->size; } @@ -1084,7 +1091,8 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, * In case of backup from replica we must fix minRecPoint */ if (current.from_replica) - set_min_recovery_point(source_pg_control_file, dest_pgdata, current.stop_lsn); + set_min_recovery_point(db_location, local_location, + source_pg_control_file, dest_pgdata, current.stop_lsn); /* close ssh session in main thread */ fio_disconnect(); diff --git a/src/checkdb.c b/src/checkdb.c index e2b2e88cf..6880b0457 100644 --- a/src/checkdb.c +++ b/src/checkdb.c @@ -733,7 +733,7 @@ do_amcheck(ConnectionOptions conn_opt, PGconn *conn) /* Entry point of pg_probackup CHECKDB subcommand */ void -do_checkdb(bool need_amcheck, +do_checkdb(pioDrive_i drive, bool need_amcheck, ConnectionOptions conn_opt, char *pgdata) { PGNodeInfo nodeInfo; @@ -755,7 +755,7 @@ do_checkdb(bool need_amcheck, cur_conn = pgdata_basic_setup(conn_opt, &nodeInfo); /* ensure that conn credentials and pgdata are consistent */ - check_system_identifiers(cur_conn, pgdata); + check_system_identifiers(drive, cur_conn, pgdata); /* * we don't need this connection anymore. diff --git a/src/data.c b/src/data.c index 7a4eec336..b8edf443f 100644 --- a/src/data.c +++ b/src/data.c @@ -71,7 +71,13 @@ static size_t restore_data_file_internal(pioReader_i in, pioDBWriter_i out, pgFi datapagemap_t *map, PageState *checksum_map, int checksum_version, datapagemap_t *lsn_map, BackupPageHeader2 *headers); -static err_i send_file(const char *to_fullpath, const char *from_path, bool cut_zero_tail, pgFile *file); +static void backup_non_data_file_internal(pioDrive_i drive_from, pioDrive_i drive_to, + const char *from_fullpath, + const char *to_fullpath, pgFile *file, + bool missing_ok); + +static err_i send_file(pioDrive_i drive_from, pioDrive_i drive_to, + const char *to_fullpath, const char *from_path, bool cut_zero_tail, pgFile *file); #ifdef HAVE_LIBZ /* Implementation of zlib compression method */ @@ -543,16 +549,19 @@ catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpa * and make a decision about copying or skiping the file. */ void -backup_non_data_file(pgFile *file, pgFile *prev_file, +backup_non_data_file(pioDrive_i drive_from, pioDrive_i drive_to, + pgFile *file, pgFile *prev_file, const char *from_fullpath, const char *to_fullpath, BackupMode backup_mode, time_t parent_backup_time, bool missing_ok) { + FOBJ_FUNC_ARP(); + err_i err; /* special treatment for global/pg_control */ if (file->external_dir_num == 0 && strcmp(file->rel_path, XLOG_CONTROL_FILE) == 0) { - copy_pgcontrol_file(FIO_DB_HOST, from_fullpath, - FIO_BACKUP_HOST, to_fullpath, file); + copy_pgcontrol_file(drive_from, from_fullpath, + drive_to, to_fullpath, file); return; } @@ -568,10 +577,18 @@ backup_non_data_file(pgFile *file, pgFile *prev_file, * file could be deleted under our feets. * But then backup_non_data_file_internal will handle it safely */ - if (file->forkName != cfm) - file->crc = fio_get_crc32(FIO_DB_HOST, from_fullpath, false, true); - else - file->crc = fio_get_crc32_truncated(FIO_DB_HOST, from_fullpath, true); + file->crc = $i(pioGetCRC32, drive_from, .path = from_fullpath, + .truncated = file->forkName == cfm, .err = &err); + if (getErrno(err) == ENOENT) + { + elog(LOG, "File \"%s\" is not found", from_fullpath); + file->crc = 0; + file->read_size = 0; + file->write_size = 0; + file->uncompressed_size = 0; + file->write_size = FILE_NOT_FOUND; + return; + } /* ...and checksum is the same... */ if (EQ_CRC32C(file->crc, prev_file->crc)) @@ -581,7 +598,7 @@ backup_non_data_file(pgFile *file, pgFile *prev_file, } } - backup_non_data_file_internal(from_fullpath, FIO_DB_HOST, + backup_non_data_file_internal(drive_from, drive_to, from_fullpath, to_fullpath, file, missing_ok); } @@ -1150,8 +1167,8 @@ restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, * TODO: optimize remote copying */ void -backup_non_data_file_internal(const char *from_fullpath, - fio_location from_location, +backup_non_data_file_internal(pioDrive_i drive_from, pioDrive_i drive_to, + const char *from_fullpath, const char *to_fullpath, pgFile *file, bool missing_ok) { @@ -1168,7 +1185,7 @@ backup_non_data_file_internal(const char *from_fullpath, file->uncompressed_size = 0; /* backup non-data file */ - err = send_file(to_fullpath, from_fullpath, cut_zero_tail, file); + err = send_file(drive_from, drive_to, to_fullpath, from_fullpath, cut_zero_tail, file); /* handle errors */ if($haserr(err)) { @@ -1186,13 +1203,11 @@ backup_non_data_file_internal(const char *from_fullpath, } static err_i -send_file(const char *to_fullpath, const char *from_fullpath, bool cut_zero_tail, pgFile *file) { +send_file(pioDrive_i db_drive, pioDrive_i backup_drive, const char *to_fullpath, const char *from_fullpath, bool cut_zero_tail, pgFile *file) { FOBJ_FUNC_ARP(); err_i err = $noerr(); pioReadStream_i in; pioWriteCloser_i out; - pioDrive_i backup_drive = pioDriveForLocation(FIO_BACKUP_HOST); - pioDrive_i db_drive = pioDriveForLocation(FIO_DB_HOST); /* open to_fullpath */ out = $i(pioOpenRewrite, backup_drive, .path = to_fullpath, diff --git a/src/init.c b/src/init.c index bd9a857f7..633746458 100644 --- a/src/init.c +++ b/src/init.c @@ -64,7 +64,8 @@ do_init(CatalogState *catalogState) int do_add_instance(InstanceState *instanceState, InstanceConfig *instance) { - pioDrive_i backup_location = pioDriveForLocation(FIO_BACKUP_HOST); + pioDrive_i backup_location = instanceState->backup_location; + pioDrive_i db_location = instanceState->database_location; CatalogState *catalogState = instanceState->catalog_state; err_i err; bool exists; @@ -76,9 +77,9 @@ do_add_instance(InstanceState *instanceState, InstanceConfig *instance) "(-D, --pgdata)"); /* Read system_identifier from PGDATA */ - instance->system_identifier = get_system_identifier(FIO_DB_HOST, instance->pgdata, false); + instance->system_identifier = get_system_identifier(db_location, instance->pgdata, false); /* Starting from PostgreSQL 11 read WAL segment size from PGDATA */ - instance->xlog_seg_size = get_xlog_seg_size(instance->pgdata); + instance->xlog_seg_size = get_xlog_seg_size(db_location, instance->pgdata); /* Ensure that all root directories already exist */ /* TODO maybe call do_init() here instead of error?*/ @@ -89,7 +90,7 @@ do_add_instance(InstanceState *instanceState, InstanceConfig *instance) catalogState->wal_subdir_path}; for (i = 0; i < ft_arrsz(paths); i++) { - exists = $i(pioExists, catalogState->backup_location, .path = paths[i], + exists = $i(pioExists, backup_location, .path = paths[i], .expected_kind = PIO_KIND_DIRECTORY, .err = &err); if ($haserr(err)) ft_logerr(FT_FATAL, $errmsg(err), "Check instance"); @@ -105,7 +106,7 @@ do_add_instance(InstanceState *instanceState, InstanceConfig *instance) }; for (i = 0; i < ft_arrsz(paths); i++) { - exists = !$i(pioIsDirEmpty, catalogState->backup_location, .path = paths[i][1], + exists = !$i(pioIsDirEmpty, backup_location, .path = paths[i][1], .err = &err); if ($haserr(err)) ft_logerr(FT_FATAL, $errmsg(err), "Check instance"); diff --git a/src/merge.c b/src/merge.c index 5a58e1f42..96b48d3a5 100644 --- a/src/merge.c +++ b/src/merge.c @@ -15,6 +15,7 @@ typedef struct { + InstanceState *state; parray *merge_filelist; parray *parent_chain; @@ -1347,7 +1348,8 @@ merge_non_data_file(parray *parent_chain, pgBackup *full_backup, } /* Copy file to FULL backup directory into temp file */ - backup_non_data_file(tmp_file, NULL, from_fullpath, + backup_non_data_file(full_backup->backup_location, dest_backup->backup_location, + tmp_file, NULL, from_fullpath, to_fullpath_tmp, BACKUP_MODE_FULL, 0, false); /* sync temp file to disk */ diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 93de27ab3..e49dea00c 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -875,7 +875,7 @@ main(int argc, char *argv[]) if (wal_file_path == NULL) { /* 1st case */ - system_id = get_system_identifier(FIO_DB_HOST, current_dir, false); + system_id = get_system_identifier(instanceState->database_location, current_dir, false); join_path_components(archive_push_xlog_dir, current_dir, XLOGDIR); } else @@ -895,7 +895,7 @@ main(int argc, char *argv[]) .file1 = stripped_wal_file_path, .file2 = archive_push_xlog_dir)) { /* 2nd case */ - system_id = get_system_identifier(FIO_DB_HOST, instance_config.pgdata, false); + system_id = get_system_identifier(instanceState->database_location, instance_config.pgdata, false); /* archive_push_xlog_dir already have right value */ } else @@ -905,7 +905,7 @@ main(int argc, char *argv[]) else elog(ERROR, "Value specified to --wal_file_path is too long"); - system_id = get_system_identifier(FIO_DB_HOST, current_dir, true); + system_id = get_system_identifier(instanceState->database_location, current_dir, true); /* 3rd case if control file present -- i.e. system_id != 0 */ if (system_id == 0) @@ -1051,7 +1051,7 @@ main(int argc, char *argv[]) do_set_backup(instanceState, current.backup_id, set_backup_params); break; case CHECKDB_CMD: - do_checkdb(need_amcheck, + do_checkdb(pioDriveForLocation(FIO_DB_HOST), need_amcheck, instance_config.conn_opt, instance_config.pgdata); break; case NO_CMD: diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 72458d251..5d694cf9d 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -542,6 +542,7 @@ typedef struct pgSetBackupParams typedef struct { PGNodeInfo *nodeInfo; + struct InstanceState *instanceState; const char *from_root; const char *to_root; @@ -773,7 +774,7 @@ extern char** commands_args; /* in backup.c */ extern int do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, bool no_validate, bool no_sync, bool backup_logs, time_t start_time); -extern void do_checkdb(bool need_amcheck, ConnectionOptions conn_opt, +extern void do_checkdb(pioDrive_i drive, bool need_amcheck, ConnectionOptions conn_opt, char *pgdata); extern BackupMode parse_backup_mode(const char *value); extern const char *deparse_backup_mode(BackupMode mode); @@ -811,8 +812,6 @@ extern void reset_backup_id(pgBackup *backup); extern parray *get_backup_filelist(pgBackup *backup, bool strict); extern parray *read_timeline_history(const char *arclog_path, TimeLineID targetTLI, bool strict); extern bool tliIsPartOfHistory(const parray *timelines, TimeLineID tli); -extern DestDirIncrCompatibility check_incremental_compatibility(const char *pgdata, uint64 system_identifier, - IncrRestoreMode incremental_mode); /* in remote.c */ extern void check_remote_agent_compatibility(int agent_version, @@ -1034,14 +1033,11 @@ extern void backup_data_file(pgFile *file, const char *from_fullpath, const char XLogRecPtr prev_backup_start_lsn, BackupMode backup_mode, CompressAlg calg, int clevel, uint32 checksum_version, HeaderMap *hdr_map, bool missing_ok); -extern void backup_non_data_file(pgFile *file, pgFile *prev_file, +extern void backup_non_data_file(pioDrive_i from, pioDrive_i to, + pgFile *file, pgFile *prev_file, const char *from_fullpath, const char *to_fullpath, BackupMode backup_mode, time_t parent_backup_time, bool missing_ok); -extern void backup_non_data_file_internal(const char *from_fullpath, - fio_location from_location, - const char *to_fullpath, pgFile *file, - bool missing_ok); extern size_t restore_data_file(parray *parent_chain, pgFile *dest_file, pioDBWriter_i out, const char *to_fullpath, bool use_bitmap, PageState *checksum_map, @@ -1090,17 +1086,17 @@ extern XLogRecPtr get_next_record_lsn(const char *archivedir, XLogSegNo segno, T /* in util.c */ extern TimeLineID get_current_timeline(PGconn *conn); -extern TimeLineID get_current_timeline_from_control(fio_location location, const char *pgdata_path); extern XLogRecPtr get_checkpoint_location(PGconn *conn); -extern uint64 get_system_identifier(fio_location location, const char *pgdata_path, bool safe); +extern uint64 get_system_identifier(pioDrive_i drive, const char *pgdata_path, bool safe); extern uint64 get_remote_system_identifier(PGconn *conn); -extern pg_crc32c get_pgcontrol_checksum(const char *pgdata_path); -extern uint32 get_xlog_seg_size(const char *pgdata_path); -extern void get_redo(fio_location location, const char *pgdata_path, RedoParams *redo); -extern void set_min_recovery_point(pgFile *file, const char *backup_path, +extern pg_crc32c get_pgcontrol_checksum(pioDrive_i drive, const char *pgdata_path); +extern uint32 get_xlog_seg_size(pioDrive_i drive, const char *pgdata_path); +extern void get_redo(pioDrive_i drive, const char *pgdata_path, RedoParams *redo); +extern void set_min_recovery_point(pioDrive_i drive_from, pioDrive_i drive_to, + pgFile *file, const char *backup_path, XLogRecPtr stop_backup_lsn); -extern void copy_pgcontrol_file(fio_location from_location, const char *from_fullpath, - fio_location to_location, const char *to_fullpath, pgFile *file); +extern void copy_pgcontrol_file(pioDrive_i drive_from, const char *from_fullpath, + pioDrive_i drive_to, const char *to_fullpath, pgFile *file); extern void time2iso(char *buf, size_t len, time_t time, bool utc); extern const char *status2str(BackupStatus status); @@ -1122,7 +1118,7 @@ extern void pretty_size(int64 size, char *buf, size_t len); extern void pretty_time_interval(double time, char *buf, size_t len); extern PGconn *pgdata_basic_setup(ConnectionOptions conn_opt, PGNodeInfo *nodeInfo); -extern void check_system_identifiers(PGconn *conn, const char *pgdata); +extern void check_system_identifiers(pioDrive_i drive, PGconn *conn, const char *pgdata); extern void parse_filelist_filenames(parray *files, const char *root); /* in ptrack.c */ diff --git a/src/restore.c b/src/restore.c index 81203fbb6..0b8a5d4ca 100644 --- a/src/restore.c +++ b/src/restore.c @@ -68,6 +68,10 @@ static void restore_chain(InstanceState *instanceState, const char *pgdata_path, bool no_sync, bool cleanup_pgdata, bool backup_has_tblspc); +static DestDirIncrCompatibility check_incremental_compatibility(pioDrive_i dbdrive, + const char *pgdata, + uint64 system_identifier, + IncrRestoreMode incremental_mode); /* * Iterate over backup list to find all ancestors of the broken parent_backup * and update their status to BACKUP_STATUS_ORPHAN @@ -154,7 +158,8 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg elog(INFO, "Running incremental restore into nonempty directory: \"%s\"", instance_config.pgdata); - rc = check_incremental_compatibility(instance_config.pgdata, + rc = check_incremental_compatibility(instanceState->database_location, + instance_config.pgdata, instance_config.system_identifier, params->incremental_mode); if (rc == POSTMASTER_IS_RUNNING) @@ -480,7 +485,7 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg { RedoParams redo; parray *timelines = NULL; - get_redo(FIO_DB_HOST, instance_config.pgdata, &redo); + get_redo(instanceState->database_location, instance_config.pgdata, &redo); if (redo.checksum_version == 0) elog(ERROR, "Incremental restore in 'lsn' mode require " @@ -2085,7 +2090,8 @@ get_dbOid_exclude_list(pgBackup *backup, parray *datname_list, * TODO: add PG_CONTROL_IS_MISSING */ DestDirIncrCompatibility -check_incremental_compatibility(const char *pgdata, uint64 system_identifier, +check_incremental_compatibility(pioDrive_i dbdrive, const char *pgdata, + uint64 system_identifier, IncrRestoreMode incremental_mode) { uint64 system_id_pgdata; @@ -2129,9 +2135,9 @@ check_incremental_compatibility(const char *pgdata, uint64 system_identifier, */ elog(LOG, "Trying to read pg_control file in destination directory"); - system_id_pgdata = get_system_identifier(FIO_DB_HOST, pgdata, false); + system_id_pgdata = get_system_identifier(dbdrive, pgdata, false); - if (system_id_pgdata == instance_config.system_identifier) + if (system_id_pgdata == system_identifier) system_id_match = true; else elog(WARNING, "Backup catalog was initialized for system id %llu, " @@ -2145,10 +2151,9 @@ check_incremental_compatibility(const char *pgdata, uint64 system_identifier, if (incremental_mode == INCR_LSN) { err_i err = $noerr(); - pioDrive_i drive = pioDriveForLocation(FIO_DB_HOST); join_path_components(backup_label, pgdata, "backup_label"); - if($i(pioExists, drive, .path = backup_label, .err = &err)) + if($i(pioExists, dbdrive, .path = backup_label, .err = &err)) { elog(WARNING, "Destination directory contains \"backup_control\" file. " "This does NOT mean that you should delete this file and retry, only that " diff --git a/src/util.c b/src/util.c index 2db650a8e..1b4cbd7ae 100644 --- a/src/util.c +++ b/src/util.c @@ -31,6 +31,13 @@ static const char *statusName[] = "CORRUPT" }; +static err_i +get_control_file(pioDrive_i drive, path_t pgdata_path, path_t file, + ControlFileData *control, bool safe); + +static TimeLineID +get_current_timeline_from_control(pioDrive_i drive, const char *pgdata_path); + const char * base36enc_to(long unsigned int value, char buf[static base36bufsize]) { @@ -84,16 +91,13 @@ checkControlFile(ControlFileData *ControlFile) * Write ControlFile to pg_control */ static void -writeControlFile(fio_location location, const char *path, ControlFileData *ControlFile) +writeControlFile(pioDrive_i drive, const char *path, ControlFileData *ControlFile) { char *buffer = NULL; - pioDrive_i drive; err_i err; int ControlFileSize = PG_CONTROL_FILE_SIZE; - drive = pioDriveForLocation(location); - /* copy controlFileSize */ buffer = pg_malloc0(ControlFileSize); memcpy(buffer, ControlFile, sizeof(ControlFileData)); @@ -114,36 +118,37 @@ writeControlFile(fio_location location, const char *path, ControlFileData *Contr TimeLineID get_current_timeline(PGconn *conn) { - PGresult *res; TimeLineID tli = 0; char *val; + bool ok = false; + pioDrive_i drive; res = pgut_execute_extended(conn, "SELECT timeline_id FROM pg_catalog.pg_control_checkpoint()", 0, NULL, true, true); if (PQresultStatus(res) == PGRES_TUPLES_OK) - val = PQgetvalue(res, 0, 0); - else - return get_current_timeline_from_control(FIO_DB_HOST, instance_config.pgdata); - - if (!parse_uint32(val, &tli, 0)) { - PQclear(res); - elog(WARNING, "Invalid value of timeline_id %s", val); - - /* TODO 3.0 remove it and just error out */ - return get_current_timeline_from_control(FIO_DB_HOST, instance_config.pgdata); + val = PQgetvalue(res, 0, 0); + ok = parse_uint32(val, &tli, 0); + if (!ok) + /* TODO 3.0 just error out */ + elog(WARNING, "Invalid value of timeline_id %s", val); } + PQclear(res); + + if (ok) + return tli; - return tli; + /* or get timeline from control data */ + drive = pioDriveForLocation(FIO_DB_HOST); + return get_current_timeline_from_control(drive, instance_config.pgdata); } static err_i -get_control_file(fio_location location, path_t pgdata_path, path_t file, +get_control_file(pioDrive_i drive, path_t pgdata_path, path_t file, ControlFileData *control, bool safe) { - pioDrive_i drive; char fullpath[MAXPGPATH]; ft_bytes_t bytes; err_i err; @@ -152,7 +157,6 @@ get_control_file(fio_location location, path_t pgdata_path, path_t file, join_path_components(fullpath, pgdata_path, file); - drive = pioDriveForLocation(location); bytes = $i(pioReadFile, drive, .path = fullpath, .err = &err); if ($haserr(err) && safe) { @@ -178,14 +182,14 @@ get_control_file(fio_location location, path_t pgdata_path, path_t file, } /* Get timeline from pg_control file */ -TimeLineID -get_current_timeline_from_control(fio_location location, const char *pgdata_path) +static TimeLineID +get_current_timeline_from_control(pioDrive_i drive, const char *pgdata_path) { FOBJ_FUNC_ARP(); ControlFileData ControlFile; err_i err; - err = get_control_file(location, pgdata_path, XLOG_CONTROL_FILE, + err = get_control_file(drive, pgdata_path, XLOG_CONTROL_FILE, &ControlFile, false); if ($haserr(err)) ft_logerr(FT_FATAL, $errmsg(err), "Getting current timeline"); @@ -216,13 +220,13 @@ get_checkpoint_location(PGconn *conn) } uint64 -get_system_identifier(fio_location location, const char *pgdata_path, bool safe) +get_system_identifier(pioDrive_i drive, const char *pgdata_path, bool safe) { FOBJ_FUNC_ARP(); ControlFileData ControlFile; err_i err; - err = get_control_file(location, pgdata_path, XLOG_CONTROL_FILE, + err = get_control_file(drive, pgdata_path, XLOG_CONTROL_FILE, &ControlFile, safe); if ($haserr(err)) ft_logerr(FT_FATAL, $errmsg(err), "Getting system identifier"); @@ -252,14 +256,14 @@ get_remote_system_identifier(PGconn *conn) } uint32 -get_xlog_seg_size(const char *pgdata_path) +get_xlog_seg_size(pioDrive_i drive, const char *pgdata_path) { #if PG_VERSION_NUM >= 110000 FOBJ_FUNC_ARP(); ControlFileData ControlFile; err_i err; - err = get_control_file(FIO_DB_HOST, pgdata_path, XLOG_CONTROL_FILE, + err = get_control_file(drive, pgdata_path, XLOG_CONTROL_FILE, &ControlFile, false); if ($haserr(err)) ft_logerr(FT_FATAL, $errmsg(err), "Trying to fetch segment size"); @@ -271,13 +275,13 @@ get_xlog_seg_size(const char *pgdata_path) } pg_crc32c -get_pgcontrol_checksum(const char *pgdata_path) +get_pgcontrol_checksum(pioDrive_i drive, const char *pgdata_path) { FOBJ_FUNC_ARP(); ControlFileData ControlFile; err_i err; - err = get_control_file(FIO_BACKUP_HOST, pgdata_path, XLOG_CONTROL_FILE, + err = get_control_file(drive, pgdata_path, XLOG_CONTROL_FILE, &ControlFile, false); if ($haserr(err)) ft_logerr(FT_FATAL, $errmsg(err), "Getting pgcontrol checksum"); @@ -286,13 +290,13 @@ get_pgcontrol_checksum(const char *pgdata_path) } void -get_redo(fio_location location, const char *pgdata_path, RedoParams *redo) +get_redo(pioDrive_i drive, const char *pgdata_path, RedoParams *redo) { FOBJ_FUNC_ARP(); ControlFileData ControlFile; err_i err; - err = get_control_file(location, pgdata_path, XLOG_CONTROL_FILE, + err = get_control_file(drive, pgdata_path, XLOG_CONTROL_FILE, &ControlFile, false); if ($haserr(err)) ft_logerr(FT_FATAL, $errmsg(err), "Fetching redo lsn"); @@ -322,7 +326,8 @@ get_redo(fio_location location, const char *pgdata_path, RedoParams *redo) * 'as-is' is not to be trusted. */ void -set_min_recovery_point(pgFile *file, const char *backup_path, +set_min_recovery_point(pioDrive_i drive_from, pioDrive_i drive_to, + pgFile *file, const char *backup_path, XLogRecPtr stop_backup_lsn) { FOBJ_FUNC_ARP(); @@ -330,7 +335,7 @@ set_min_recovery_point(pgFile *file, const char *backup_path, char fullpath[MAXPGPATH]; err_i err; - err = get_control_file(FIO_DB_HOST, instance_config.pgdata, XLOG_CONTROL_FILE, + err = get_control_file(drive_from, instance_config.pgdata, XLOG_CONTROL_FILE, &ControlFile, false); if ($haserr(err)) ft_logerr(FT_FATAL, $errmsg(err), "Set min recovery point"); @@ -353,7 +358,7 @@ set_min_recovery_point(pgFile *file, const char *backup_path, /* overwrite pg_control */ join_path_components(fullpath, backup_path, XLOG_CONTROL_FILE); - writeControlFile(FIO_LOCAL_HOST, fullpath, &ControlFile); + writeControlFile(drive_to, fullpath, &ControlFile); /* Update pg_control checksum in backup_list */ file->crc = ControlFile.crc; @@ -363,14 +368,14 @@ set_min_recovery_point(pgFile *file, const char *backup_path, * Copy pg_control file to backup. We do not apply compression to this file. */ void -copy_pgcontrol_file(fio_location from_location, const char *from_fullpath, - fio_location to_location, const char *to_fullpath, pgFile *file) +copy_pgcontrol_file(pioDrive_i drive_from, const char *from_fullpath, + pioDrive_i drive_to, const char *to_fullpath, pgFile *file) { FOBJ_FUNC_ARP(); ControlFileData ControlFile; err_i err; - err = get_control_file(from_location, from_fullpath, "", + err = get_control_file(drive_from, from_fullpath, "", &ControlFile, false); if ($haserr(err)) ft_logerr(FT_FATAL, $errmsg(err), "Fetching control file"); @@ -380,7 +385,7 @@ copy_pgcontrol_file(fio_location from_location, const char *from_fullpath, file->write_size = PG_CONTROL_FILE_SIZE; file->uncompressed_size = PG_CONTROL_FILE_SIZE; - writeControlFile(to_location, to_fullpath, &ControlFile); + writeControlFile(drive_to, to_fullpath, &ControlFile); } /* diff --git a/src/validate.c b/src/validate.c index 428982763..18cecb95a 100644 --- a/src/validate.c +++ b/src/validate.c @@ -22,7 +22,7 @@ static bool skipped_due_to_lock = false; typedef struct { - pioDrive_i drive; + pioDrive_i backup_drive; const char *base_path; parray *files; bool corrupted; @@ -144,7 +144,7 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params) { validate_files_arg *arg = &(threads_args[i]); - arg->drive = backup->backup_location; + arg->backup_drive = backup->backup_location; arg->base_path = backup->database_dir; arg->files = files; arg->corrupted = false; @@ -237,7 +237,7 @@ pgBackupValidateFiles(void *arg) validate_files_arg *arguments = (validate_files_arg *)arg; int num_files = parray_num(arguments->files); pg_crc32 crc; - pioDrive_i drive = arguments->drive; + pioDrive_i backup_drive = arguments->backup_drive; err_i err; for (i = 0; i < num_files; i++) @@ -308,7 +308,7 @@ pgBackupValidateFiles(void *arg) join_path_components(file_fullpath, arguments->base_path, file->rel_path); /* TODO: it is redundant to check file existence using stat */ - st = $i(pioStat, drive, .path = file_fullpath, .follow_symlink = false, + st = $i(pioStat, backup_drive, .path = file_fullpath, .follow_symlink = false, .err = &err); if ($haserr(err)) { @@ -353,12 +353,12 @@ pgBackupValidateFiles(void *arg) if (arguments->backup_version >= 20025 && strcmp(file->rel_path, XLOG_CONTROL_FILE) == 0 && file->external_dir_num == 0) - crc = get_pgcontrol_checksum(arguments->base_path); + crc = get_pgcontrol_checksum(backup_drive, arguments->base_path); else #if PG_VERSION_NUM >= 120000 { ft_assert(arguments->backup_version >= 20025); - crc = $i(pioGetCRC32, drive, .path = file_fullpath, + crc = $i(pioGetCRC32, backup_drive, .path = file_fullpath, .err = &err); } #else /* PG_VERSION_NUM < 120000 */ From 5164bb1ea0f91867edf72c5ddcc6ec27b5612832 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 20 Dec 2022 04:55:20 +0300 Subject: [PATCH 251/339] remove fio_get_crc32 from restore_non_data_file --- src/data.c | 19 ++++++++++++------- src/pg_probackup.h | 4 ++-- src/restore.c | 5 +++-- 3 files changed, 17 insertions(+), 11 deletions(-) diff --git a/src/data.c b/src/data.c index b8edf443f..21a306920 100644 --- a/src/data.c +++ b/src/data.c @@ -1026,8 +1026,8 @@ restore_data_file_internal(pioReader_i in, pioDBWriter_i out, pgFile *file, uint } size_t -restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, - pgFile *dest_file, pioDBWriter_i out, const char *to_fullpath, +restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, pgFile *dest_file, + pioDrive_i out_drive, pioDBWriter_i out, const char *to_fullpath, bool already_exists) { char from_root[MAXPGPATH]; @@ -1113,11 +1113,16 @@ restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, { /* compare checksums of already existing file and backup file */ pg_crc32 file_crc; - if (tmp_file->forkName == cfm && - tmp_file->uncompressed_size > tmp_file->write_size) - file_crc = fio_get_crc32_truncated(FIO_DB_HOST, to_fullpath, false); - else - file_crc = fio_get_crc32(FIO_DB_HOST, to_fullpath, false, false); + bool truncated; + + truncated = tmp_file->forkName == cfm && + tmp_file->uncompressed_size > tmp_file->write_size; + + file_crc = $i(pioGetCRC32, out_drive, .path = to_fullpath, + .truncated = truncated, .err = &err); + + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Non-data file CRC32"); if (file_crc == tmp_file->crc) { diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 5d694cf9d..ad0b04de9 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1042,8 +1042,8 @@ extern void backup_non_data_file(pioDrive_i from, pioDrive_i to, extern size_t restore_data_file(parray *parent_chain, pgFile *dest_file, pioDBWriter_i out, const char *to_fullpath, bool use_bitmap, PageState *checksum_map, XLogRecPtr shift_lsn, datapagemap_t *lsn_map, bool use_headers); -extern size_t restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, - pgFile *dest_file, pioDBWriter_i out, const char *to_fullpath, +extern size_t restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, pgFile *dest_file, + pioDrive_i out_drive, pioDBWriter_i out, const char *to_fullpath, bool already_exists); extern bool create_empty_file(const char *to_root, fio_location to_location, pgFile *file); diff --git a/src/restore.c b/src/restore.c index 0b8a5d4ca..8734d785a 100644 --- a/src/restore.c +++ b/src/restore.c @@ -1273,8 +1273,9 @@ restore_files(void *arg) { /* Destination file is non-data file */ arguments->restored_bytes += restore_non_data_file(arguments->parent_chain, - arguments->dest_backup, dest_file, out, to_fullpath, - already_exists); + arguments->dest_backup, dest_file, + $reduce(pioDrive, db_drive), out, + to_fullpath, already_exists); } done: From f91b2256e0e5fad53e37ec043f4d1efe478a3b99 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 20 Dec 2022 05:03:45 +0300 Subject: [PATCH 252/339] remove all non-pio CRC32 staff + some additional cleanup --- src/utils/file.c | 295 +---------------------------------------------- src/utils/file.h | 12 -- 2 files changed, 1 insertion(+), 306 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index 48fbd3f6e..d10e794b3 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -677,70 +677,9 @@ fio_sync(fio_location location, const char* path) enum { GET_CRC32_DECOMPRESS = 1, - GET_CRC32_MISSING_OK = 2, - GET_CRC32_TRUNCATED = 4 + GET_CRC32_TRUNCATED = 2 }; -/* Get crc32 of file */ -static pg_crc32 -fio_get_crc32_ex(fio_location location, const char *file_path, - bool decompress, bool missing_ok, bool truncated) -{ - if (decompress && truncated) - elog(ERROR, "Could not calculate CRC for compressed truncated file"); - - if (fio_is_remote(location)) - { - fio_header hdr; - size_t path_len = strlen(file_path) + 1; - pg_crc32 crc = 0; - hdr.cop = FIO_GET_CRC32; - hdr.handle = -1; - hdr.size = path_len; - hdr.arg = 0; - - if (decompress) - hdr.arg = GET_CRC32_DECOMPRESS; - if (missing_ok) - hdr.arg |= GET_CRC32_MISSING_OK; - if (truncated) - hdr.arg |= GET_CRC32_TRUNCATED; - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(fio_stdout, file_path, path_len), path_len); - IO_CHECK(fio_read_all(fio_stdin, &crc, sizeof(crc)), sizeof(crc)); - - return crc; - } - else - { - if (decompress) - return pgFileGetCRC32Cgz(file_path, missing_ok); - else if (truncated) - return pgFileGetCRC32CTruncated(file_path, missing_ok); - else - return pgFileGetCRC32C(file_path, missing_ok); - } -} - -/* - * Remove file or directory - * if missing_ok, then ignore ENOENT error - */ -pg_crc32 -fio_get_crc32(fio_location location, const char *file_path, - bool decompress, bool missing_ok) -{ - return fio_get_crc32_ex(location, file_path, decompress, missing_ok, false); -} - -pg_crc32 -fio_get_crc32_truncated(fio_location location, const char *file_path, - bool missing_ok) -{ - return fio_get_crc32_ex(location, file_path, false, missing_ok, true); -} - /* Remove file */ int fio_remove(fio_location location, const char* path, bool missing_ok) @@ -937,13 +876,6 @@ fio_iterate_pages_impl(pioDBDrive_i drive, int out, const char *path, ft_strbuf_free(&req); } -typedef struct send_file_state { - bool calc_crc; - uint32_t crc; - int64_t read_size; - int64_t write_size; -} send_file_state; - /* find page border of all-zero tail */ static size_t find_zero_tail(char *buf, size_t len) @@ -993,26 +925,6 @@ find_zero_tail(char *buf, size_t len) return len; } -static void -fio_send_file_crc(send_file_state* st, char *buf, size_t len) -{ - int64_t write_size; - - if (!st->calc_crc) - return; - - write_size = st->write_size; - while (st->read_size > write_size) - { - size_t crc_len = Min(st->read_size - write_size, sizeof(zerobuf)); - COMP_CRC32C(st->crc, zerobuf, crc_len); - write_size += crc_len; - } - - if (len > 0) - COMP_CRC32C(st->crc, buf, len); -} - /* Send open file content * On error we return FIO_ERROR message with following codes * FIO_ERROR: @@ -1032,7 +944,6 @@ fio_send_file_content_impl(int fd, int out, const char* path) char *buf = pgut_malloc(CHUNK_SIZE); size_t read_len = 0; char *errormsg = NULL; - int64_t read_size = 0; int64_t non_zero_len; /* copy content */ @@ -1083,8 +994,6 @@ fio_send_file_content_impl(int fd, int out, const char* path) hdr.arg = read_len - non_zero_len; IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); } - - read_size += read_len; } /* we are done, send eof */ @@ -1095,196 +1004,6 @@ fio_send_file_content_impl(int fd, int out, const char* path) return true; } -/* - * Read the local file to compute its CRC. - * We cannot make decision about file decompression because - * user may ask to backup already compressed files and we should be - * obvious about it. - */ -pg_crc32 -pgFileGetCRC32C(const char *file_path, bool missing_ok) -{ - FILE *fp; - pg_crc32 crc = 0; - char *buf; - size_t len = 0; - - INIT_CRC32C(crc); - - /* open file in binary read mode */ - fp = fopen(file_path, PG_BINARY_R); - if (fp == NULL) - { - if (missing_ok && errno == ENOENT) - { - FIN_CRC32C(crc); - return crc; - } - - elog(ERROR, "Cannot open file \"%s\": %s", - file_path, strerror(errno)); - } - - /* disable stdio buffering */ - setvbuf(fp, NULL, _IONBF, BUFSIZ); - buf = pgut_malloc(STDIO_BUFSIZE); - - /* calc CRC of file */ - do - { - if (interrupted) - elog(ERROR, "interrupted during CRC calculation"); - - len = fread(buf, 1, STDIO_BUFSIZE, fp); - - if (ferror(fp)) - elog(ERROR, "Cannot read \"%s\": %s", file_path, strerror(errno)); - - COMP_CRC32C(crc, buf, len); - } - while (!feof(fp)); - - FIN_CRC32C(crc); - fclose(fp); - pg_free(buf); - - return crc; -} - -/* - * Read the local file to compute CRC for it extened to real_size. - */ -pg_crc32 -pgFileGetCRC32CTruncated(const char *file_path, bool missing_ok) -{ - FILE *fp; - char *buf; - size_t len = 0; - size_t non_zero_len; - send_file_state st = {true, 0, 0, 0}; - - INIT_CRC32C(st.crc); - - /* open file in binary read mode */ - fp = fopen(file_path, PG_BINARY_R); - if (fp == NULL) - { - if (missing_ok && errno == ENOENT) - { - FIN_CRC32C(st.crc); - return st.crc; - } - - elog(ERROR, "Cannot open file \"%s\": %s", - file_path, strerror(errno)); - } - - /* disable stdio buffering */ - setvbuf(fp, NULL, _IONBF, BUFSIZ); - buf = pgut_malloc(CHUNK_SIZE); - - /* calc CRC of file */ - do - { - if (interrupted) - elog(ERROR, "interrupted during CRC calculation"); - - len = fread(buf, 1, STDIO_BUFSIZE, fp); - - if (ferror(fp)) - elog(ERROR, "Cannot read \"%s\": %s", file_path, strerror(errno)); - - non_zero_len = find_zero_tail(buf, len); - /* same trick as in fio_send_file */ - if (st.read_size + non_zero_len < PAGE_ZEROSEARCH_FINE_GRANULARITY && - st.read_size + len > 0) - { - non_zero_len = Min(PAGE_ZEROSEARCH_FINE_GRANULARITY, - st.read_size + len); - non_zero_len -= st.read_size; - } - if (non_zero_len) - { - fio_send_file_crc(&st, buf, non_zero_len); - st.write_size += st.read_size + non_zero_len; - } - st.read_size += len; - - } while (!feof(fp)); - - FIN_CRC32C(st.crc); - fclose(fp); - pg_free(buf); - - return st.crc; -} - -/* - * Read the local file to compute its CRC. - * We cannot make decision about file decompression because - * user may ask to backup already compressed files and we should be - * obvious about it. - */ -pg_crc32 -pgFileGetCRC32Cgz(const char *file_path, bool missing_ok) -{ - gzFile fp; - pg_crc32 crc = 0; - int len = 0; - int err; - char *buf; - - INIT_CRC32C(crc); - - /* open file in binary read mode */ - fp = gzopen(file_path, PG_BINARY_R); - if (fp == NULL) - { - if (missing_ok && errno == ENOENT) - { - FIN_CRC32C(crc); - return crc; - } - - elog(ERROR, "Cannot open file \"%s\": %s", - file_path, strerror(errno)); - } - - buf = pgut_malloc(STDIO_BUFSIZE); - - /* calc CRC of file */ - for (;;) - { - if (interrupted) - elog(ERROR, "interrupted during CRC calculation"); - - len = gzread(fp, buf, STDIO_BUFSIZE); - - if (len <= 0) - { - /* we either run into eof or error */ - if (gzeof(fp)) - break; - else - { - const char *err_str = NULL; - - err_str = gzerror(fp, &err); - elog(ERROR, "Cannot read from compressed file %s", err_str); - } - } - - /* update CRC */ - COMP_CRC32C(crc, buf, len); - } - - FIN_CRC32C(crc); - gzclose(fp); - pg_free(buf); - - return crc; -} - #if PG_VERSION_NUM < 120000 /* * Read the local file to compute its CRC using traditional algorithm. @@ -1779,18 +1498,6 @@ fio_communicate(int in, int out) IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); break; - case FIO_GET_CRC32: - Assert((hdr.arg & GET_CRC32_TRUNCATED) == 0 || - (hdr.arg & (GET_CRC32_TRUNCATED|GET_CRC32_DECOMPRESS)) == GET_CRC32_TRUNCATED); - /* calculate crc32 for a file */ - if ((hdr.arg & GET_CRC32_DECOMPRESS)) - crc = pgFileGetCRC32Cgz(buf, (hdr.arg & GET_CRC32_MISSING_OK) != 0); - else if ((hdr.arg & GET_CRC32_TRUNCATED)) - crc = pgFileGetCRC32CTruncated(buf, (hdr.arg & GET_CRC32_MISSING_OK) != 0); - else - crc = pgFileGetCRC32C(buf, (hdr.arg & GET_CRC32_MISSING_OK) != 0); - IO_CHECK(fio_write_all(out, &crc, sizeof(crc)), sizeof(crc)); - break; case PIO_GET_CRC32: crc = $i(pioGetCRC32, drive, .path = buf, .compressed = (hdr.arg & GET_CRC32_DECOMPRESS) != 0, diff --git a/src/utils/file.h b/src/utils/file.h index 9dc55f8ca..72a4bc9cf 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -37,17 +37,14 @@ typedef enum FIO_STAT, FIO_SEND, FIO_PAGE, - FIO_GET_CRC32, /* used for incremental restore */ FIO_GET_CHECKSUM_MAP, FIO_GET_LSN_MAP, FIO_ERROR, - FIO_SEND_FILE, FIO_SEND_FILE_EOF, /* messages for closing connection */ FIO_DISCONNECT, FIO_DISCONNECTED, - FIO_LIST_DIR, FIO_REMOVE_DIR, FIO_CHECK_POSTMASTER, FIO_READLINK, @@ -159,12 +156,6 @@ extern void fio_get_agent_version(int* protocol, char* payload_buf, size_t pa /* pathname-style functions */ extern int fio_sync(fio_location location, const char* path); -extern pg_crc32 -fio_get_crc32(fio_location location, const char *file_path, - bool decompress, bool missing_ok); -extern pg_crc32 -fio_get_crc32_truncated(fio_location location, const char *file_path, - bool missing_ok); extern int fio_symlink(fio_location location, const char* target, const char* link_path, bool overwrite); extern int fio_remove(fio_location location, const char* path, bool missing_ok); @@ -177,12 +168,9 @@ struct datapagemap; /* defined in datapagemap.h */ extern struct datapagemap *fio_get_lsn_map(fio_location location, const char *fullpath, uint32 checksum_version, int n_blocks, XLogRecPtr horizonLsn, BlockNumber segmentno); -extern pg_crc32 pgFileGetCRC32C(const char *file_path, bool missing_ok); -extern pg_crc32 pgFileGetCRC32CTruncated(const char *file_path, bool missing_ok); #if PG_VERSION_NUM < 120000 extern pg_crc32 pgFileGetCRC32(const char *file_path, bool missing_ok); #endif -extern pg_crc32 pgFileGetCRC32Cgz(const char *file_path, bool missing_ok); extern pio_file_kind_e pio_statmode2file_kind(mode_t mode, const char* path); extern pio_file_kind_e pio_str2file_kind(const char* str, const char* path); From b7b71eef7217146ad23a93a5fcdeae1947da8972 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 20 Dec 2022 05:22:14 +0300 Subject: [PATCH 253/339] make pio_helper_pioGetCRC32 to be useful for other pioDrive implementations --- src/utils/file.c | 24 ++++++++++++++++-------- src/utils/file.h | 5 +++++ 2 files changed, 21 insertions(+), 8 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index d10e794b3..8946731dd 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -2379,22 +2379,20 @@ pioLocalDrive_pioRename(VSelf, path_t old_path, path_t new_path) return $noerr(); } -static pg_crc32 -pioLocalDrive_pioGetCRC32(VSelf, path_t path, - bool compressed, bool truncated, - err_i *err) +pg_crc32 +pio_helper_pioGetCRC32(pioOpenReadStream_i self, path_t path, + bool compressed, bool truncated, err_i *err) { FOBJ_FUNC_ARP(); - Self(pioLocalDrive); - fobj_reset_err(err); + fobj_reset_err(err); pioReadStream_i file; pioRead_i read; pioCRC32Counter* crc; - elog(VERBOSE, "Local Drive calculate crc32 for '%s', compressed=%d, truncated=%d", + elog(VERBOSE, "Calculate crc32 for '%s', compressed=%d, truncated=%d", path, compressed, truncated); - file = $(pioOpenReadStream, self, .path = path, .err = err); + file = $i(pioOpenReadStream, self, .path = path, .err = err); if ($haserr(*err)) { $iresult(*err); @@ -2417,6 +2415,16 @@ pioLocalDrive_pioGetCRC32(VSelf, path_t path, return pioCRC32Counter_getCRC32(crc); } +static pg_crc32 +pioLocalDrive_pioGetCRC32(VSelf, path_t path, + bool compressed, bool truncated, + err_i *err) +{ + Self(pioLocalDrive); + return pio_helper_pioGetCRC32($bind(pioOpenReadStream, self), + path, compressed, truncated, err); +} + static bool pioLocalDrive_pioIsRemote(VSelf) { diff --git a/src/utils/file.h b/src/utils/file.h index 72a4bc9cf..8ba3d235c 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -324,6 +324,11 @@ fobj_iface(pioDBDrive); extern pioDrive_i pioDriveForLocation(fio_location location); extern pioDBDrive_i pioDBDriveForLocation(fio_location location); +extern pg_crc32 +pio_helper_pioGetCRC32(pioOpenReadStream_i self, path_t path, + bool compressed, bool truncated, err_i *err); + + struct doIteratePages_params { path_t from_fullpath; pgFile *file; From 01c42a465558eac7c57e1bc9c9cdcf15498baf15 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 20 Dec 2022 05:32:48 +0300 Subject: [PATCH 254/339] remove couple of FIO_LOCAL_HOST --- src/merge.c | 3 +-- src/parsexlog.c | 5 ++++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/merge.c b/src/merge.c index 96b48d3a5..72131981b 100644 --- a/src/merge.c +++ b/src/merge.c @@ -1152,8 +1152,7 @@ reorder_external_dirs(pgBackup *to_backup, parray *to_external, { char old_path[MAXPGPATH]; makeExternalDirPathByNum(old_path, externaldir_template, i + 1); - pioDrive_i drive = pioDriveForLocation(FIO_LOCAL_HOST); - $i(pioRemoveDir, drive, .root = old_path, .root_as_well = true); + $i(pioRemoveDir, to_backup->backup_location, .root = old_path, .root_as_well = true); } else if (from_num != i + 1) { diff --git a/src/parsexlog.c b/src/parsexlog.c index 9d399f3cf..5d887831b 100644 --- a/src/parsexlog.c +++ b/src/parsexlog.c @@ -407,6 +407,7 @@ validate_wal(pgBackup *backup, const char *archivedir, char last_timestamp[100], target_timestamp[100]; bool all_wal = false; + err_i err; if (!XRecOffIsValid(backup->start_lsn)) elog(ERROR, "Invalid start_lsn value %X/%X of backup %s", @@ -458,8 +459,10 @@ validate_wal(pgBackup *backup, const char *archivedir, * If recovery target is provided, ensure that archive files exist in * archive directory. */ - if (dir_is_empty(archivedir, FIO_LOCAL_HOST)) + if ($i(pioIsDirEmpty, backup->backup_location, .path = archivedir, .err = &err)) elog(ERROR, "WAL archive is empty. You cannot restore backup to a recovery target without WAL archive."); + else if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Check WAL archive dir"); /* * Check if we have in archive all files needed to restore backup From 098b5c6b35ff3938499b57e5f40642b4ee2628e8 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 20 Dec 2022 12:10:59 +0300 Subject: [PATCH 255/339] artificial main function in main.c file To introduce C unit tests we separate `main` function into distinct file, so we may have `main` function in test files. --- Makefile | 2 ++ src/main.c | 18 ++++++++++++++++++ src/pg_probackup.c | 2 +- src/pg_probackup.h | 3 +++ 4 files changed, 24 insertions(+), 1 deletion(-) create mode 100644 src/main.c diff --git a/Makefile b/Makefile index 1ea388927..679524f5a 100644 --- a/Makefile +++ b/Makefile @@ -41,6 +41,8 @@ OBJS += src/archive.o src/backup.o src/catalog.o src/checkdb.o src/configure.o s OBJS += src/compatibility/file_compat.o src/compatibility/receivelog.o \ src/compatibility/streamutil.o \ src/compatibility/walmethods.o src/compatibility/file_compat10.o +# artificial file for `main` function +OBJS += src/main.o # sources borrowed from postgresql (paths are relative to pg top dir) BORROWED_H_SRC := diff --git a/src/main.c b/src/main.c new file mode 100644 index 000000000..5099d7682 --- /dev/null +++ b/src/main.c @@ -0,0 +1,18 @@ +/*------------------------------------------------------------------------- + * + * main.c: proxy main + * + * To allow linking pg_probackup.c with tests we have to have proxy `main` + * in separate file to call real `pbk_main` function + * + * Copyright (c) 2018-2022, Postgres Professional + * + *------------------------------------------------------------------------- + */ +#include "pg_probackup.h" + +int +main(int argc, char** argv) +{ + return pbk_main(argc, argv); +} \ No newline at end of file diff --git a/src/pg_probackup.c b/src/pg_probackup.c index e49dea00c..d54aeb706 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -287,7 +287,7 @@ static ConfigOption cmd_options[] = * Entry point of pg_probackup command. */ int -main(int argc, char *argv[]) +pbk_main(int argc, char *argv[]) { char *command = NULL; ProbackupSubcmd backup_subcmd = NO_CMD; diff --git a/src/pg_probackup.h b/src/pg_probackup.h index ad0b04de9..56eb913c7 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -10,6 +10,9 @@ #ifndef PG_PROBACKUP_H #define PG_PROBACKUP_H +/* real main */ +extern int pbk_main(int argc, char** argv); + #include #include "postgres_fe.h" From e18f99dde513c4c00c171c9cf1bf2268e9fc8b77 Mon Sep 17 00:00:00 2001 From: Sergey Fukanchik Date: Tue, 20 Dec 2022 15:21:27 +0300 Subject: [PATCH 256/339] PBCKP-80 skip locks for non-local drives --- src/catalog.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/catalog.c b/src/catalog.c index fbf5db766..8ffd6f813 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -296,6 +296,8 @@ lock_backup(pgBackup *backup, bool strict, bool exclusive) int grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) { + FOBJ_FUNC_ARP(); + pioDrive_i drive = pioDriveForLocation(FIO_BACKUP_HOST); char lock_file[MAXPGPATH]; FILE *fp = NULL; char buffer[256]; @@ -309,6 +311,12 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) GELF_FAILED_CLOSE = 2, } failed_action = 0; + if ($i(pioIsRemote, drive)) + { + elog(INFO, "Skipping exclusive lock on remote drive"); + return LOCK_OK; + } + join_path_components(lock_file, root_dir, BACKUP_LOCK_FILE); /* @@ -659,10 +667,18 @@ read_shared_lock_file(const char *lock_file) static void write_shared_lock_file(const char *lock_file, ft_arr_pid_t pids) { + FOBJ_FUNC_ARP(); + pioDrive_i drive = pioDriveForLocation(FIO_BACKUP_HOST); FILE *fp_out = NULL; char lock_file_tmp[MAXPGPATH]; ssize_t i; + if ($i(pioIsRemote, drive)) + { + elog(INFO, "Skipping write lock on remote drive"); + return; + } + snprintf(lock_file_tmp, MAXPGPATH, "%s%s", lock_file, "tmp"); fp_out = fopen(lock_file_tmp, "w"); From a46cdfc734ac73afb54d2ff2e2208bc26d662f9e Mon Sep 17 00:00:00 2001 From: Sergey Fukanchik Date: Tue, 20 Dec 2022 15:22:10 +0300 Subject: [PATCH 257/339] PBCKP-80 rewrite get_data_file_headers to PIO --- src/data.c | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/src/data.c b/src/data.c index 21a306920..22f2133ca 100644 --- a/src/data.c +++ b/src/data.c @@ -1909,8 +1909,9 @@ copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, BackupPageHeader2* get_data_file_headers(HeaderMap *hdr_map, pgFile *file, uint32 backup_version, bool strict) { + FOBJ_FUNC_ARP(); + pioDrive_i drive = pioDriveForLocation(FIO_BACKUP_HOST); bool success = false; - FILE *in = NULL; size_t read_len = 0; pg_crc32 hdr_crc; BackupPageHeader2 *headers = NULL; @@ -1918,6 +1919,9 @@ get_data_file_headers(HeaderMap *hdr_map, pgFile *file, uint32 backup_version, b int z_len = 0; char *zheaders = NULL; const char *errormsg = NULL; + pioReader_i reader = {0}; + size_t rc; + err_i err = $noerr(); if (backup_version < 20400) return NULL; @@ -1926,17 +1930,15 @@ get_data_file_headers(HeaderMap *hdr_map, pgFile *file, uint32 backup_version, b return NULL; /* TODO: consider to make this descriptor thread-specific */ - in = fopen(hdr_map->path, PG_BINARY_R); - - if (!in) + reader = $i(pioOpenRead, drive, .path = hdr_map->path, &err); + if ($haserr(err)) { elog(strict ? ERROR : WARNING, "Cannot open header file \"%s\": %s", hdr_map->path, strerror(errno)); return NULL; } - /* disable buffering for header file */ - setvbuf(in, NULL, _IONBF, 0); - if (fseeko(in, file->hdr_off, SEEK_SET)) + err = $i(pioSeek, reader, file->hdr_off); + if ($haserr(err)) { elog(strict ? ERROR : WARNING, "Cannot seek to position %llu in page header map \"%s\": %s", file->hdr_off, hdr_map->path, strerror(errno)); @@ -1953,7 +1955,8 @@ get_data_file_headers(HeaderMap *hdr_map, pgFile *file, uint32 backup_version, b zheaders = pgut_malloc(file->hdr_size); memset(zheaders, 0, file->hdr_size); - if (fread(zheaders, 1, file->hdr_size, in) != file->hdr_size) + rc = $i(pioRead, reader, .buf = ft_bytes(zheaders, file->hdr_size), .err = &err); + if ($haserr(err) || rc != file->hdr_size) { elog(strict ? ERROR : WARNING, "Cannot read header file at offset: %llu len: %i \"%s\": %s", file->hdr_off, file->hdr_size, hdr_map->path, strerror(errno)); @@ -1996,8 +1999,12 @@ get_data_file_headers(HeaderMap *hdr_map, pgFile *file, uint32 backup_version, b cleanup: pg_free(zheaders); - if (in && fclose(in)) - elog(ERROR, "Cannot close file \"%s\"", hdr_map->path); + if ($notNULL(reader)) + { + err = $i(pioClose,reader); + if ($haserr(err)) + elog(ERROR, "Cannot close file \"%s\"", hdr_map->path); + } if (!success) { @@ -2014,7 +2021,7 @@ void write_page_headers(BackupPageHeader2 *headers, pgFile *file, HeaderMap *hdr_map, bool is_merge) { FOBJ_FUNC_ARP(); - pioDBDrive_i drive = pioDBDriveForLocation(FIO_BACKUP_HOST); + pioDrive_i drive = pioDriveForLocation(FIO_BACKUP_HOST); err_i err = $noerr(); size_t read_len = 0; /* header compression */ From 9c8afba8dad353790591b976fdd0ada3e2a1f506 Mon Sep 17 00:00:00 2001 From: Alexander Burtsev Date: Tue, 20 Dec 2022 18:40:05 +0300 Subject: [PATCH 258/339] Cleanup CFS breadcrumbs from the CE version source code --- tests/cfs_backup_test.py | 1231 ----------------- tests/cfs_catchup_test.py | 117 -- tests/cfs_restore_test.py | 447 ------ tests/cfs_validate_backup_test.py | 24 - .../{cfs_helpers.py => data_helpers.py} | 0 tests/remote_test.py | 1 - 6 files changed, 1820 deletions(-) delete mode 100644 tests/cfs_backup_test.py delete mode 100644 tests/cfs_catchup_test.py delete mode 100644 tests/cfs_validate_backup_test.py rename tests/helpers/{cfs_helpers.py => data_helpers.py} (100%) diff --git a/tests/cfs_backup_test.py b/tests/cfs_backup_test.py deleted file mode 100644 index cd2826d21..000000000 --- a/tests/cfs_backup_test.py +++ /dev/null @@ -1,1231 +0,0 @@ -import os -import unittest -import random -import shutil - -from .helpers.cfs_helpers import find_by_extensions, find_by_name, find_by_pattern, corrupt_file -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException - -tblspace_name = 'cfs_tblspace' - - -class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): - # --- Begin --- # - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def setUp(self): - self.backup_dir = os.path.join( - self.tmp_path, self.module_name, self.fname, 'backup') - self.node = self.make_simple_node( - base_dir="{0}/{1}/node".format(self.module_name, self.fname), - set_replication=True, - ptrack_enable=True, - initdb_params=['--data-checksums'], - pg_options={ - 'cfs_encryption': 'off', - 'max_wal_senders': '2', - 'shared_buffers': '200MB' - } - ) - - self.init_pb(self.backup_dir) - self.add_instance(self.backup_dir, 'node', self.node) - self.set_archiving(self.backup_dir, 'node', self.node) - - self.node.slow_start() - - self.node.safe_psql( - "postgres", - "CREATE EXTENSION ptrack") - - self.create_tblspace_in_node(self.node, tblspace_name, cfs=True) - - tblspace = self.node.safe_psql( - "postgres", - "SELECT * FROM pg_tablespace WHERE spcname='{0}'".format( - tblspace_name)) - - self.assertIn( - tblspace_name, str(tblspace), - "ERROR: The tablespace not created " - "or it create without compressions") - - self.assertIn( - "compression=true", str(tblspace), - "ERROR: The tablespace not created " - "or it create without compressions") - - self.assertTrue( - find_by_name( - [self.get_tblspace_path(self.node, tblspace_name)], - ['pg_compression']), - "ERROR: File pg_compression not found" - ) - - # --- Section: Full --- # - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_fullbackup_empty_tablespace(self): - """Case: Check fullbackup empty compressed tablespace""" - - backup_id = None - try: - backup_id = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - show_backup = self.show_pb(self.backup_dir, 'node', backup_id) - self.assertEqual( - "OK", - show_backup["status"], - "ERROR: Full backup status is not valid. \n " - "Current backup status={0}".format(show_backup["status"]) - ) - self.assertTrue( - find_by_name( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['pg_compression']), - "ERROR: File pg_compression not found in backup dir" - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_fullbackup_empty_tablespace_stream(self): - """Case: Check fullbackup empty compressed tablespace with options stream""" - - backup_id = None - try: - backup_id = self.backup_node( - self.backup_dir, 'node', self.node, - backup_type='full', options=['--stream']) - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - show_backup = self.show_pb(self.backup_dir, 'node', backup_id) - self.assertEqual( - "OK", - show_backup["status"], - "ERROR: Full backup status is not valid. \n " - "Current backup status={0}".format(show_backup["status"]) - ) - self.assertTrue( - find_by_name( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['pg_compression']), - "ERROR: File pg_compression not found in backup dir" - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - # PGPRO-1018 invalid file size - def test_fullbackup_after_create_table(self): - """Case: Make full backup after created table in the tablespace""" - if not self.enterprise: - return - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - backup_id = None - try: - backup_id = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "\n ERROR: {0}\n CMD: {1}".format( - repr(e.message), - repr(self.cmd) - ) - ) - return False - show_backup = self.show_pb(self.backup_dir, 'node', backup_id) - self.assertEqual( - "OK", - show_backup["status"], - "ERROR: Full backup status is not valid. \n " - "Current backup status={0}".format(show_backup["status"]) - ) - self.assertTrue( - find_by_name( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['pg_compression']), - "ERROR: File pg_compression not found in {0}".format( - os.path.join(self.backup_dir, 'node', backup_id)) - ) - - # check cfm size - cfms = find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['.cfm']) - self.assertTrue(cfms, "ERROR: .cfm files not found in backup dir") - for cfm in cfms: - size = os.stat(cfm).st_size - self.assertLessEqual(size, 4096, - "ERROR: {0} is not truncated (has size {1} > 4096)".format( - cfm, size - )) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - # PGPRO-1018 invalid file size - def test_fullbackup_after_create_table_stream(self): - """ - Case: Make full backup after created table in the tablespace with option --stream - """ - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - backup_id = None - try: - backup_id = self.backup_node( - self.backup_dir, 'node', self.node, - backup_type='full', options=['--stream']) - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - show_backup = self.show_pb(self.backup_dir, 'node', backup_id) - self.assertEqual( - "OK", - show_backup["status"], - "ERROR: Full backup status is not valid. \n " - "Current backup status={0}".format(show_backup["status"]) - ) - self.assertTrue( - find_by_name( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['pg_compression']), - "ERROR: File pg_compression not found in backup dir" - ) - self.assertTrue( - find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['.cfm']), - "ERROR: .cfm files not found in backup dir" - ) - - # --- Section: Incremental from empty tablespace --- # - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_fullbackup_empty_tablespace_ptrack_after_create_table(self): - """ - Case: Make full backup before created table in the tablespace. - Make ptrack backup after create table - """ - - try: - self.backup_node( - self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - backup_id = None - try: - backup_id = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='ptrack') - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - show_backup = self.show_pb(self.backup_dir, 'node', backup_id) - self.assertEqual( - "OK", - show_backup["status"], - "ERROR: Incremental backup status is not valid. \n " - "Current backup status={0}".format(show_backup["status"]) - ) - self.assertTrue( - find_by_name( - [self.get_tblspace_path(self.node, tblspace_name)], - ['pg_compression']), - "ERROR: File pg_compression not found" - ) - self.assertTrue( - find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['.cfm']), - "ERROR: .cfm files not found in backup dir" - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_fullbackup_empty_tablespace_ptrack_after_create_table_stream(self): - """ - Case: Make full backup before created table in the tablespace. - Make ptrack backup after create table - """ - - try: - self.backup_node( - self.backup_dir, 'node', self.node, - backup_type='full', options=['--stream']) - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - backup_id = None - try: - backup_id = self.backup_node( - self.backup_dir, 'node', self.node, - backup_type='ptrack', options=['--stream']) - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - show_backup = self.show_pb(self.backup_dir, 'node', backup_id) - self.assertEqual( - "OK", - show_backup["status"], - "ERROR: Incremental backup status is not valid. \n " - "Current backup status={0}".format(show_backup["status"]) - ) - self.assertTrue( - find_by_name( - [self.get_tblspace_path(self.node, tblspace_name)], - ['pg_compression']), - "ERROR: File pg_compression not found" - ) - self.assertTrue( - find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['.cfm']), - "ERROR: .cfm files not found in backup dir" - ) - self.assertFalse( - find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['_ptrack']), - "ERROR: _ptrack files was found in backup dir" - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_fullbackup_empty_tablespace_page_after_create_table(self): - """ - Case: Make full backup before created table in the tablespace. - Make page backup after create table - """ - - try: - self.backup_node( - self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - backup_id = None - try: - backup_id = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='page') - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - show_backup = self.show_pb(self.backup_dir, 'node', backup_id) - self.assertEqual( - "OK", - show_backup["status"], - "ERROR: Incremental backup status is not valid. \n " - "Current backup status={0}".format(show_backup["status"]) - ) - self.assertTrue( - find_by_name( - [self.get_tblspace_path(self.node, tblspace_name)], - ['pg_compression']), - "ERROR: File pg_compression not found" - ) - self.assertTrue( - find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['.cfm']), - "ERROR: .cfm files not found in backup dir" - ) - - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_page_doesnt_store_unchanged_cfm(self): - """ - Case: Test page backup doesn't store cfm file if table were not modified - """ - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - try: - backup_id_full = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.assertTrue( - find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id_full)], - ['.cfm']), - "ERROR: .cfm files not found in backup dir" - ) - - try: - backup_id = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='page') - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - show_backup = self.show_pb(self.backup_dir, 'node', backup_id) - self.assertEqual( - "OK", - show_backup["status"], - "ERROR: Incremental backup status is not valid. \n " - "Current backup status={0}".format(show_backup["status"]) - ) - self.assertTrue( - find_by_name( - [self.get_tblspace_path(self.node, tblspace_name)], - ['pg_compression']), - "ERROR: File pg_compression not found" - ) - self.assertFalse( - find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['.cfm']), - "ERROR: .cfm files is found in backup dir" - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_fullbackup_empty_tablespace_page_after_create_table_stream(self): - """ - Case: Make full backup before created table in the tablespace. - Make page backup after create table - """ - - try: - self.backup_node( - self.backup_dir, 'node', self.node, - backup_type='full', options=['--stream']) - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - backup_id = None - try: - backup_id = self.backup_node( - self.backup_dir, 'node', self.node, - backup_type='page', options=['--stream']) - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - show_backup = self.show_pb(self.backup_dir, 'node', backup_id) - self.assertEqual( - "OK", - show_backup["status"], - "ERROR: Incremental backup status is not valid. \n " - "Current backup status={0}".format(show_backup["status"]) - ) - self.assertTrue( - find_by_name( - [self.get_tblspace_path(self.node, tblspace_name)], - ['pg_compression']), - "ERROR: File pg_compression not found" - ) - self.assertTrue( - find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['.cfm']), - "ERROR: .cfm files not found in backup dir" - ) - self.assertFalse( - find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['_ptrack']), - "ERROR: _ptrack files was found in backup dir" - ) - - # --- Section: Incremental from fill tablespace --- # - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_fullbackup_after_create_table_ptrack_after_create_table(self): - """ - Case: Make full backup before created table in the tablespace. - Make ptrack backup after create table. - Check: incremental backup will not greater as full - """ - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,1005000) i".format('t1', tblspace_name) - ) - - backup_id_full = None - try: - backup_id_full = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,10) i".format('t2', tblspace_name) - ) - - backup_id_ptrack = None - try: - backup_id_ptrack = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='ptrack') - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - show_backup_full = self.show_pb( - self.backup_dir, 'node', backup_id_full) - show_backup_ptrack = self.show_pb( - self.backup_dir, 'node', backup_id_ptrack) - self.assertGreater( - show_backup_full["data-bytes"], - show_backup_ptrack["data-bytes"], - "ERROR: Size of incremental backup greater than full. \n " - "INFO: {0} >{1}".format( - show_backup_ptrack["data-bytes"], - show_backup_full["data-bytes"] - ) - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_fullbackup_after_create_table_ptrack_after_create_table_stream(self): - """ - Case: Make full backup before created table in the tablespace(--stream). - Make ptrack backup after create table(--stream). - Check: incremental backup size should not be greater than full - """ - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,1005000) i".format('t1', tblspace_name) - ) - - backup_id_full = None - try: - backup_id_full = self.backup_node( - self.backup_dir, 'node', self.node, - backup_type='full', options=['--stream']) - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,25) i".format('t2', tblspace_name) - ) - - backup_id_ptrack = None - try: - backup_id_ptrack = self.backup_node( - self.backup_dir, 'node', self.node, - backup_type='ptrack', options=['--stream']) - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - show_backup_full = self.show_pb( - self.backup_dir, 'node', backup_id_full) - show_backup_ptrack = self.show_pb( - self.backup_dir, 'node', backup_id_ptrack) - self.assertGreater( - show_backup_full["data-bytes"], - show_backup_ptrack["data-bytes"], - "ERROR: Size of incremental backup greater than full. \n " - "INFO: {0} >{1}".format( - show_backup_ptrack["data-bytes"], - show_backup_full["data-bytes"] - ) - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_fullbackup_after_create_table_page_after_create_table(self): - """ - Case: Make full backup before created table in the tablespace. - Make ptrack backup after create table. - Check: incremental backup size should not be greater than full - """ - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,1005000) i".format('t1', tblspace_name) - ) - - backup_id_full = None - try: - backup_id_full = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,10) i".format('t2', tblspace_name) - ) - - backup_id_page = None - try: - backup_id_page = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='page') - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - show_backup_full = self.show_pb( - self.backup_dir, 'node', backup_id_full) - show_backup_page = self.show_pb( - self.backup_dir, 'node', backup_id_page) - self.assertGreater( - show_backup_full["data-bytes"], - show_backup_page["data-bytes"], - "ERROR: Size of incremental backup greater than full. \n " - "INFO: {0} >{1}".format( - show_backup_page["data-bytes"], - show_backup_full["data-bytes"] - ) - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_multiple_segments(self): - """ - Case: Make full backup before created table in the tablespace. - Make ptrack backup after create table. - Check: incremental backup will not greater as full - """ - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,1005000) i".format( - 't_heap', tblspace_name) - ) - - full_result = self.node.table_checksum("t_heap") - - try: - backup_id_full = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.node.safe_psql( - "postgres", - "INSERT INTO {0} " - "SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,10) i".format( - 't_heap') - ) - - page_result = self.node.table_checksum("t_heap") - - try: - backup_id_page = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='page') - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - show_backup_full = self.show_pb( - self.backup_dir, 'node', backup_id_full) - show_backup_page = self.show_pb( - self.backup_dir, 'node', backup_id_page) - self.assertGreater( - show_backup_full["data-bytes"], - show_backup_page["data-bytes"], - "ERROR: Size of incremental backup greater than full. \n " - "INFO: {0} >{1}".format( - show_backup_page["data-bytes"], - show_backup_full["data-bytes"] - ) - ) - - # CHECK FULL BACKUP - self.node.stop() - self.node.cleanup() - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) - self.restore_node( - self.backup_dir, 'node', self.node, backup_id=backup_id_full, - options=[ - "-j", "4", - "--recovery-target=immediate", - "--recovery-target-action=promote"]) - - self.node.slow_start() - self.assertEqual( - full_result, - self.node.table_checksum("t_heap"), - 'Lost data after restore') - - # CHECK PAGE BACKUP - self.node.stop() - self.node.cleanup() - shutil.rmtree( - self.get_tblspace_path(self.node, tblspace_name), - ignore_errors=True) - self.restore_node( - self.backup_dir, 'node', self.node, backup_id=backup_id_page, - options=[ - "-j", "4", - "--recovery-target=immediate", - "--recovery-target-action=promote"]) - - self.node.slow_start() - self.assertEqual( - page_result, - self.node.table_checksum("t_heap"), - 'Lost data after restore') - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_multiple_segments_in_multiple_tablespaces(self): - """ - Case: Make full backup before created table in the tablespace. - Make ptrack backup after create table. - Check: incremental backup will not greater as full - """ - tblspace_name_1 = 'tblspace_name_1' - tblspace_name_2 = 'tblspace_name_2' - - self.create_tblspace_in_node(self.node, tblspace_name_1, cfs=True) - self.create_tblspace_in_node(self.node, tblspace_name_2, cfs=True) - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,1005000) i".format( - 't_heap_1', tblspace_name_1)) - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,1005000) i".format( - 't_heap_2', tblspace_name_2)) - - full_result_1 = self.node.table_checksum("t_heap_1") - full_result_2 = self.node.table_checksum("t_heap_2") - - try: - backup_id_full = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.node.safe_psql( - "postgres", - "INSERT INTO {0} " - "SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,10) i".format( - 't_heap_1') - ) - - self.node.safe_psql( - "postgres", - "INSERT INTO {0} " - "SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,10) i".format( - 't_heap_2') - ) - - page_result_1 = self.node.table_checksum("t_heap_1") - page_result_2 = self.node.table_checksum("t_heap_2") - - try: - backup_id_page = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='page') - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - show_backup_full = self.show_pb( - self.backup_dir, 'node', backup_id_full) - show_backup_page = self.show_pb( - self.backup_dir, 'node', backup_id_page) - self.assertGreater( - show_backup_full["data-bytes"], - show_backup_page["data-bytes"], - "ERROR: Size of incremental backup greater than full. \n " - "INFO: {0} >{1}".format( - show_backup_page["data-bytes"], - show_backup_full["data-bytes"] - ) - ) - - # CHECK FULL BACKUP - self.node.stop() - - self.restore_node( - self.backup_dir, 'node', self.node, - backup_id=backup_id_full, - options=[ - "-j", "4", "--incremental-mode=checksum", - "--recovery-target=immediate", - "--recovery-target-action=promote"]) - self.node.slow_start() - - self.assertEqual( - full_result_1, - self.node.table_checksum("t_heap_1"), - 'Lost data after restore') - self.assertEqual( - full_result_2, - self.node.table_checksum("t_heap_2"), - 'Lost data after restore') - - # CHECK PAGE BACKUP - self.node.stop() - - self.restore_node( - self.backup_dir, 'node', self.node, - backup_id=backup_id_page, - options=[ - "-j", "4", "--incremental-mode=checksum", - "--recovery-target=immediate", - "--recovery-target-action=promote"]) - self.node.slow_start() - - self.assertEqual( - page_result_1, - self.node.table_checksum("t_heap_1"), - 'Lost data after restore') - self.assertEqual( - page_result_2, - self.node.table_checksum("t_heap_2"), - 'Lost data after restore') - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_fullbackup_after_create_table_page_after_create_table_stream(self): - """ - Case: Make full backup before created table in the tablespace(--stream). - Make ptrack backup after create table(--stream). - Check: incremental backup will not greater as full - """ - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,1005000) i".format('t1', tblspace_name) - ) - - backup_id_full = None - try: - backup_id_full = self.backup_node( - self.backup_dir, 'node', self.node, - backup_type='full', options=['--stream']) - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,10) i".format('t2', tblspace_name) - ) - - backup_id_page = None - try: - backup_id_page = self.backup_node( - self.backup_dir, 'node', self.node, - backup_type='page', options=['--stream']) - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - show_backup_full = self.show_pb( - self.backup_dir, 'node', backup_id_full) - show_backup_page = self.show_pb( - self.backup_dir, 'node', backup_id_page) - self.assertGreater( - show_backup_full["data-bytes"], - show_backup_page["data-bytes"], - "ERROR: Size of incremental backup greater than full. \n " - "INFO: {0} >{1}".format( - show_backup_page["data-bytes"], - show_backup_full["data-bytes"] - ) - ) - - # --- Make backup with not valid data(broken .cfm) --- # - @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_delete_random_cfm_file_from_tablespace_dir(self): - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - self.node.safe_psql( - "postgres", - "CHECKPOINT" - ) - - list_cmf = find_by_extensions( - [self.get_tblspace_path(self.node, tblspace_name)], - ['.cfm']) - self.assertTrue( - list_cmf, - "ERROR: .cfm-files not found into tablespace dir" - ) - - os.remove(random.choice(list_cmf)) - - self.assertRaises( - ProbackupException, - self.backup_node, - self.backup_dir, - 'node', - self.node, - backup_type='full' - ) - - @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_delete_file_pg_compression_from_tablespace_dir(self): - os.remove( - find_by_name( - [self.get_tblspace_path(self.node, tblspace_name)], - ['pg_compression'])[0]) - - self.assertRaises( - ProbackupException, - self.backup_node, - self.backup_dir, - 'node', - self.node, - backup_type='full' - ) - - @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_delete_random_data_file_from_tablespace_dir(self): - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - self.node.safe_psql( - "postgres", - "CHECKPOINT" - ) - - list_data_files = find_by_pattern( - [self.get_tblspace_path(self.node, tblspace_name)], - '^.*/\d+$') - self.assertTrue( - list_data_files, - "ERROR: Files of data not found into tablespace dir" - ) - - os.remove(random.choice(list_data_files)) - - self.assertRaises( - ProbackupException, - self.backup_node, - self.backup_dir, - 'node', - self.node, - backup_type='full' - ) - - @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_broken_random_cfm_file_into_tablespace_dir(self): - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - list_cmf = find_by_extensions( - [self.get_tblspace_path(self.node, tblspace_name)], - ['.cfm']) - self.assertTrue( - list_cmf, - "ERROR: .cfm-files not found into tablespace dir" - ) - - corrupt_file(random.choice(list_cmf)) - - self.assertRaises( - ProbackupException, - self.backup_node, - self.backup_dir, - 'node', - self.node, - backup_type='full' - ) - - @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_broken_random_data_file_into_tablespace_dir(self): - self.node.safe_psql( - "postgres", - "CREATE TABLE {0} TABLESPACE {1} " - "AS SELECT i AS id, MD5(i::text) AS text, " - "MD5(repeat(i::text,10))::tsvector AS tsvector " - "FROM generate_series(0,256) i".format('t1', tblspace_name) - ) - - list_data_files = find_by_pattern( - [self.get_tblspace_path(self.node, tblspace_name)], - '^.*/\d+$') - self.assertTrue( - list_data_files, - "ERROR: Files of data not found into tablespace dir" - ) - - corrupt_file(random.choice(list_data_files)) - - self.assertRaises( - ProbackupException, - self.backup_node, - self.backup_dir, - 'node', - self.node, - backup_type='full' - ) - - @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_broken_file_pg_compression_into_tablespace_dir(self): - - corrupted_file = find_by_name( - [self.get_tblspace_path(self.node, tblspace_name)], - ['pg_compression'])[0] - - self.assertTrue( - corrupt_file(corrupted_file), - "ERROR: File is not corrupted or it missing" - ) - - self.assertRaises( - ProbackupException, - self.backup_node, - self.backup_dir, - 'node', - self.node, - backup_type='full' - ) - -# # --- End ---# - - -#class CfsBackupEncTest(CfsBackupNoEncTest): -# # --- Begin --- # -# def setUp(self): -# os.environ["PG_CIPHER_KEY"] = "super_secret_cipher_key" -# super(CfsBackupEncTest, self).setUp() diff --git a/tests/cfs_catchup_test.py b/tests/cfs_catchup_test.py deleted file mode 100644 index f6760b72c..000000000 --- a/tests/cfs_catchup_test.py +++ /dev/null @@ -1,117 +0,0 @@ -import os -import unittest -import random -import shutil - -from .helpers.cfs_helpers import find_by_extensions, find_by_name, find_by_pattern, corrupt_file -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException - - -class CfsCatchupNoEncTest(ProbackupTest, unittest.TestCase): - - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_full_catchup_with_tablespace(self): - """ - Test tablespace transfers - """ - # preparation - src_pg = self.make_simple_node( - base_dir = os.path.join(self.module_name, self.fname, 'src'), - set_replication = True - ) - src_pg.slow_start() - tblspace1_old_path = self.get_tblspace_path(src_pg, 'tblspace1_old') - self.create_tblspace_in_node(src_pg, 'tblspace1', tblspc_path = tblspace1_old_path, cfs=True) - src_pg.safe_psql( - "postgres", - "CREATE TABLE ultimate_question TABLESPACE tblspace1 AS SELECT 42 AS answer") - src_query_result = src_pg.table_checksum("ultimate_question") - src_pg.safe_psql( - "postgres", - "CHECKPOINT") - - # do full catchup with tablespace mapping - dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) - tblspace1_new_path = self.get_tblspace_path(dst_pg, 'tblspace1_new') - self.catchup_node( - backup_mode = 'FULL', - source_pgdata = src_pg.data_dir, - destination_node = dst_pg, - options = [ - '-d', 'postgres', - '-p', str(src_pg.port), - '--stream', - '-T', '{0}={1}'.format(tblspace1_old_path, tblspace1_new_path) - ] - ) - - # 1st check: compare data directories - self.compare_pgdata( - self.pgdata_content(src_pg.data_dir), - self.pgdata_content(dst_pg.data_dir) - ) - - # check cfm size - cfms = find_by_extensions([os.path.join(dst_pg.data_dir)], ['.cfm']) - self.assertTrue(cfms, "ERROR: .cfm files not found in backup dir") - for cfm in cfms: - size = os.stat(cfm).st_size - self.assertLessEqual(size, 4096, - "ERROR: {0} is not truncated (has size {1} > 4096)".format( - cfm, size - )) - - # make changes in master tablespace - src_pg.safe_psql( - "postgres", - "UPDATE ultimate_question SET answer = -1") - src_pg.safe_psql( - "postgres", - "CHECKPOINT") - - # run&recover catchup'ed instance - dst_options = {} - dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) - dst_pg.slow_start() - - # 2nd check: run verification query - dst_query_result = dst_pg.table_checksum("ultimate_question") - self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') - - # and now delta backup - dst_pg.stop() - - self.catchup_node( - backup_mode = 'DELTA', - source_pgdata = src_pg.data_dir, - destination_node = dst_pg, - options = [ - '-d', 'postgres', - '-p', str(src_pg.port), - '--stream', - '-T', '{0}={1}'.format(tblspace1_old_path, tblspace1_new_path) - ] - ) - - # check cfm size again - cfms = find_by_extensions([os.path.join(dst_pg.data_dir)], ['.cfm']) - self.assertTrue(cfms, "ERROR: .cfm files not found in backup dir") - for cfm in cfms: - size = os.stat(cfm).st_size - self.assertLessEqual(size, 4096, - "ERROR: {0} is not truncated (has size {1} > 4096)".format( - cfm, size - )) - - # run&recover catchup'ed instance - dst_options = {} - dst_options['port'] = str(dst_pg.port) - self.set_auto_conf(dst_pg, dst_options) - dst_pg.slow_start() - - - # 3rd check: run verification query - src_query_result = src_pg.table_checksum("ultimate_question") - dst_query_result = dst_pg.table_checksum("ultimate_question") - self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') diff --git a/tests/cfs_restore_test.py b/tests/cfs_restore_test.py index 2fa35e71a..e69de29bb 100644 --- a/tests/cfs_restore_test.py +++ b/tests/cfs_restore_test.py @@ -1,447 +0,0 @@ -""" -restore - Syntax: - - pg_probackup restore -B backupdir --instance instance_name - [-D datadir] - [ -i backup_id | [{--time=time | --xid=xid | --lsn=lsn } [--inclusive=boolean]]][--timeline=timeline] [-T OLDDIR=NEWDIR] - [-j num_threads] [--progress] [-q] [-v] - -""" -import os -import unittest -import shutil - -from .helpers.cfs_helpers import find_by_name -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException - -tblspace_name = 'cfs_tblspace' -tblspace_name_new = 'cfs_tblspace_new' - - -class CfsRestoreBase(ProbackupTest, unittest.TestCase): - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def setUp(self): - self.backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - - self.node = self.make_simple_node( - base_dir="{0}/{1}/node".format(self.module_name, self.fname), - set_replication=True, - initdb_params=['--data-checksums'], - pg_options={ -# 'ptrack_enable': 'on', - 'cfs_encryption': 'off', - } - ) - - self.init_pb(self.backup_dir) - self.add_instance(self.backup_dir, 'node', self.node) - self.set_archiving(self.backup_dir, 'node', self.node) - - self.node.slow_start() - self.create_tblspace_in_node(self.node, tblspace_name, cfs=True) - - self.add_data_in_cluster() - - self.backup_id = None - try: - self.backup_id = self.backup_node(self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - def add_data_in_cluster(self): - pass - - -class CfsRestoreNoencEmptyTablespaceTest(CfsRestoreBase): - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_restore_empty_tablespace_from_fullbackup(self): - """ - Case: Restore empty tablespace from valid full backup. - """ - self.node.stop(["-m", "immediate"]) - self.node.cleanup() - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) - - try: - self.restore_node(self.backup_dir, 'node', self.node, backup_id=self.backup_id) - except ProbackupException as e: - self.fail( - "ERROR: Restore failed. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - self.assertTrue( - find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ["pg_compression"]), - "ERROR: Restored data is not valid. pg_compression not found in tablespace dir." - ) - - try: - self.node.slow_start() - except ProbackupException as e: - self.fail( - "ERROR: Instance not started after restore. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - tblspace = self.node.safe_psql( - "postgres", - "SELECT * FROM pg_tablespace WHERE spcname='{0}'".format(tblspace_name) - ).decode("UTF-8") - self.assertTrue( - tblspace_name in tblspace and "compression=true" in tblspace, - "ERROR: The tablespace not restored or it restored without compressions" - ) - - -class CfsRestoreNoencTest(CfsRestoreBase): - def add_data_in_cluster(self): - self.node.safe_psql( - "postgres", - 'CREATE TABLE {0} TABLESPACE {1} \ - AS SELECT i AS id, MD5(i::text) AS text, \ - MD5(repeat(i::text,10))::tsvector AS tsvector \ - FROM generate_series(0,1e5) i'.format('t1', tblspace_name) - ) - self.table_t1 = self.node.table_checksum("t1") - - # --- Restore from full backup ---# - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_restore_from_fullbackup_to_old_location(self): - """ - Case: Restore instance from valid full backup to old location. - """ - self.node.stop() - self.node.cleanup() - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) - - try: - self.restore_node(self.backup_dir, 'node', self.node, backup_id=self.backup_id) - except ProbackupException as e: - self.fail( - "ERROR: Restore from full backup failed. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.assertTrue( - find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ['pg_compression']), - "ERROR: File pg_compression not found in tablespace dir" - ) - try: - self.node.slow_start() - except ProbackupException as e: - self.fail( - "ERROR: Instance not started after restore. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.assertEqual( - self.node.table_checksum("t1"), - self.table_t1 - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_restore_from_fullbackup_to_old_location_3_jobs(self): - """ - Case: Restore instance from valid full backup to old location. - """ - self.node.stop() - self.node.cleanup() - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) - - try: - self.restore_node(self.backup_dir, 'node', self.node, backup_id=self.backup_id, options=['-j', '3']) - except ProbackupException as e: - self.fail( - "ERROR: Restore from full backup failed. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - self.assertTrue( - find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ['pg_compression']), - "ERROR: File pg_compression not found in backup dir" - ) - try: - self.node.slow_start() - except ProbackupException as e: - self.fail( - "ERROR: Instance not started after restore. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.assertEqual( - self.node.table_checksum("t1"), - self.table_t1 - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_restore_from_fullbackup_to_new_location(self): - """ - Case: Restore instance from valid full backup to new location. - """ - self.node.stop() - self.node.cleanup() - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) - - node_new = self.make_simple_node(base_dir="{0}/{1}/node_new_location".format(self.module_name, self.fname)) - node_new.cleanup() - - try: - self.restore_node(self.backup_dir, 'node', node_new, backup_id=self.backup_id) - self.set_auto_conf(node_new, {'port': node_new.port}) - except ProbackupException as e: - self.fail( - "ERROR: Restore from full backup failed. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - self.assertTrue( - find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ['pg_compression']), - "ERROR: File pg_compression not found in backup dir" - ) - try: - node_new.slow_start() - except ProbackupException as e: - self.fail( - "ERROR: Instance not started after restore. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.assertEqual( - node_new.table_checksum("t1"), - self.table_t1 - ) - node_new.cleanup() - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_restore_from_fullbackup_to_new_location_5_jobs(self): - """ - Case: Restore instance from valid full backup to new location. - """ - self.node.stop() - self.node.cleanup() - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) - - node_new = self.make_simple_node(base_dir="{0}/{1}/node_new_location".format(self.module_name, self.fname)) - node_new.cleanup() - - try: - self.restore_node(self.backup_dir, 'node', node_new, backup_id=self.backup_id, options=['-j', '5']) - self.set_auto_conf(node_new, {'port': node_new.port}) - except ProbackupException as e: - self.fail( - "ERROR: Restore from full backup failed. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - self.assertTrue( - find_by_name([self.get_tblspace_path(self.node, tblspace_name)], ['pg_compression']), - "ERROR: File pg_compression not found in backup dir" - ) - try: - node_new.slow_start() - except ProbackupException as e: - self.fail( - "ERROR: Instance not started after restore. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.assertEqual( - node_new.table_checksum("t1"), - self.table_t1 - ) - node_new.cleanup() - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_restore_from_fullbackup_to_old_location_tablespace_new_location(self): - self.node.stop() - self.node.cleanup() - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) - - os.mkdir(self.get_tblspace_path(self.node, tblspace_name_new)) - - try: - self.restore_node( - self.backup_dir, - 'node', self.node, - backup_id=self.backup_id, - options=["-T", "{0}={1}".format( - self.get_tblspace_path(self.node, tblspace_name), - self.get_tblspace_path(self.node, tblspace_name_new) - ) - ] - ) - except ProbackupException as e: - self.fail( - "ERROR: Restore from full backup failed. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - self.assertTrue( - find_by_name([self.get_tblspace_path(self.node, tblspace_name_new)], ['pg_compression']), - "ERROR: File pg_compression not found in new tablespace location" - ) - try: - self.node.slow_start() - except ProbackupException as e: - self.fail( - "ERROR: Instance not started after restore. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.assertEqual( - self.node.table_checksum("t1"), - self.table_t1 - ) - - # @unittest.expectedFailure - # @unittest.skip("skip") - @unittest.skipUnless(ProbackupTest.enterprise, 'skip') - def test_restore_from_fullbackup_to_old_location_tablespace_new_location_3_jobs(self): - self.node.stop() - self.node.cleanup() - shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) - - os.mkdir(self.get_tblspace_path(self.node, tblspace_name_new)) - - try: - self.restore_node( - self.backup_dir, - 'node', self.node, - backup_id=self.backup_id, - options=["-j", "3", "-T", "{0}={1}".format( - self.get_tblspace_path(self.node, tblspace_name), - self.get_tblspace_path(self.node, tblspace_name_new) - ) - ] - ) - except ProbackupException as e: - self.fail( - "ERROR: Restore from full backup failed. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - self.assertTrue( - find_by_name([self.get_tblspace_path(self.node, tblspace_name_new)], ['pg_compression']), - "ERROR: File pg_compression not found in new tablespace location" - ) - try: - self.node.slow_start() - except ProbackupException as e: - self.fail( - "ERROR: Instance not started after restore. \n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) - - self.assertEqual( - self.node.table_checksum("t1"), - self.table_t1 - ) - - # @unittest.expectedFailure - @unittest.skip("skip") - def test_restore_from_fullbackup_to_new_location_tablespace_new_location(self): - pass - - # @unittest.expectedFailure - @unittest.skip("skip") - def test_restore_from_fullbackup_to_new_location_tablespace_new_location_5_jobs(self): - pass - - # @unittest.expectedFailure - @unittest.skip("skip") - def test_restore_from_ptrack(self): - """ - Case: Restore from backup to old location - """ - pass - - # @unittest.expectedFailure - @unittest.skip("skip") - def test_restore_from_ptrack_jobs(self): - """ - Case: Restore from backup to old location, four jobs - """ - pass - - # @unittest.expectedFailure - @unittest.skip("skip") - def test_restore_from_ptrack_new_jobs(self): - pass - -# --------------------------------------------------------- # - # @unittest.expectedFailure - @unittest.skip("skip") - def test_restore_from_page(self): - """ - Case: Restore from backup to old location - """ - pass - - # @unittest.expectedFailure - @unittest.skip("skip") - def test_restore_from_page_jobs(self): - """ - Case: Restore from backup to old location, four jobs - """ - pass - - # @unittest.expectedFailure - @unittest.skip("skip") - def test_restore_from_page_new_jobs(self): - """ - Case: Restore from backup to new location, four jobs - """ - pass - - -#class CfsRestoreEncEmptyTablespaceTest(CfsRestoreNoencEmptyTablespaceTest): -# # --- Begin --- # -# def setUp(self): -# os.environ["PG_CIPHER_KEY"] = "super_secret_cipher_key" -# super(CfsRestoreNoencEmptyTablespaceTest, self).setUp() -# -# -#class CfsRestoreEncTest(CfsRestoreNoencTest): -# # --- Begin --- # -# def setUp(self): -# os.environ["PG_CIPHER_KEY"] = "super_secret_cipher_key" -# super(CfsRestoreNoencTest, self).setUp() diff --git a/tests/cfs_validate_backup_test.py b/tests/cfs_validate_backup_test.py deleted file mode 100644 index 343020dfc..000000000 --- a/tests/cfs_validate_backup_test.py +++ /dev/null @@ -1,24 +0,0 @@ -import os -import unittest -import random - -from .helpers.cfs_helpers import find_by_extensions, find_by_name, find_by_pattern, corrupt_file -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException - -tblspace_name = 'cfs_tblspace' - - -class CfsValidateBackupNoenc(ProbackupTest,unittest.TestCase): - def setUp(self): - pass - - def test_validate_fullbackup_empty_tablespace_after_delete_pg_compression(self): - pass - - def tearDown(self): - pass - - -#class CfsValidateBackupNoenc(CfsValidateBackupNoenc): -# os.environ["PG_CIPHER_KEY"] = "super_secret_cipher_key" -# super(CfsValidateBackupNoenc).setUp() diff --git a/tests/helpers/cfs_helpers.py b/tests/helpers/data_helpers.py similarity index 100% rename from tests/helpers/cfs_helpers.py rename to tests/helpers/data_helpers.py diff --git a/tests/remote_test.py b/tests/remote_test.py index 2d36d7346..0d9894f65 100644 --- a/tests/remote_test.py +++ b/tests/remote_test.py @@ -2,7 +2,6 @@ import os from time import sleep from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -from .helpers.cfs_helpers import find_by_name class RemoteTest(ProbackupTest, unittest.TestCase): From d50c60b7dbc8043946b28863d603759e7ded2cc1 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 21 Dec 2022 07:56:01 +0300 Subject: [PATCH 259/339] fix build with Pg < 12 --- src/validate.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/validate.c b/src/validate.c index 18cecb95a..d3a822194 100644 --- a/src/validate.c +++ b/src/validate.c @@ -363,10 +363,10 @@ pgBackupValidateFiles(void *arg) } #else /* PG_VERSION_NUM < 120000 */ if (arguments->backup_version <= 20021 || arguments->backup_version >= 20025) - crc = $i(pioGetCRC32, drive, .path = file_fullpath, .err = &err); + crc = $i(pioGetCRC32, backup_drive, .path = file_fullpath, .err = &err); else { - ft_assert(!$i(pioIsRemote, drive)); + ft_assert(!$i(pioIsRemote, backup_drive)); crc = pgFileGetCRC32(file_fullpath, false); } #endif /* PG_VERSION_NUM < 120000 */ @@ -779,7 +779,7 @@ validate_tablespace_map(pgBackup *backup, bool no_validate) .err = &err); else { - ft_assert(!$i(pioIsRemote, drive)); + ft_assert(!$i(pioIsRemote, backup->backup_location)); crc = pgFileGetCRC32(map_path, false); } #endif /* PG_VERSION_NUM < 120000 */ From d29a8d4c548b796a156e47f34dcb79c8b9b4c117 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 21 Dec 2022 08:19:20 +0300 Subject: [PATCH 260/339] fix "Cleanup CFS breadcrumbs" --- tests/__init__.py | 7 ++----- tests/cfs_restore_test.py | 0 tests/external_test.py | 2 +- tests/helpers/__init__.py | 2 +- 4 files changed, 4 insertions(+), 7 deletions(-) delete mode 100644 tests/cfs_restore_test.py diff --git a/tests/__init__.py b/tests/__init__.py index c8d2c70c3..891f87447 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -4,8 +4,8 @@ from . import init_test, merge_test, option_test, show_test, compatibility_test, \ backup_test, delete_test, delta_test, restore_test, validate_test, \ retention_test, pgpro560_test, pgpro589_test, pgpro2068_test, false_positive_test, replica_test, \ - compression_test, page_test, ptrack_test, archive_test, exclude_test, cfs_backup_test, cfs_restore_test, \ - cfs_validate_backup_test, auth_test, time_stamp_test, logging_test, \ + compression_test, page_test, ptrack_test, archive_test, exclude_test, \ + auth_test, time_stamp_test, logging_test, \ locking_test, remote_test, external_test, config_test, checkdb_test, set_backup_test, incr_restore_test, \ catchup_test, CVE_2018_1058_test, time_consuming_test @@ -35,9 +35,6 @@ def load_tests(loader, tests, pattern): suite.addTests(loader.loadTestsFromModule(compatibility_test)) suite.addTests(loader.loadTestsFromModule(checkdb_test)) suite.addTests(loader.loadTestsFromModule(config_test)) - suite.addTests(loader.loadTestsFromModule(cfs_backup_test)) - suite.addTests(loader.loadTestsFromModule(cfs_restore_test)) - suite.addTests(loader.loadTestsFromModule(cfs_validate_backup_test)) suite.addTests(loader.loadTestsFromModule(compression_test)) suite.addTests(loader.loadTestsFromModule(delete_test)) suite.addTests(loader.loadTestsFromModule(delta_test)) diff --git a/tests/cfs_restore_test.py b/tests/cfs_restore_test.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tests/external_test.py b/tests/external_test.py index 53f3c5449..73ff0e0fb 100644 --- a/tests/external_test.py +++ b/tests/external_test.py @@ -2,7 +2,7 @@ import os from time import sleep from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -from .helpers.cfs_helpers import find_by_name +from .helpers.data_helpers import find_by_name import shutil diff --git a/tests/helpers/__init__.py b/tests/helpers/__init__.py index 4ae3ef8c4..e94e6366b 100644 --- a/tests/helpers/__init__.py +++ b/tests/helpers/__init__.py @@ -1,4 +1,4 @@ -__all__ = ['ptrack_helpers', 'cfs_helpers', 'expected_errors'] +__all__ = ['ptrack_helpers', 'data_helpers', 'expected_errors'] import unittest From 76bbd0401b98628508e9f7944278e390677a81bf Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 21 Dec 2022 10:58:38 +0300 Subject: [PATCH 261/339] ... --- src/pg_probackup.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/pg_probackup.c b/src/pg_probackup.c index d54aeb706..4c44e4ba0 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -961,11 +961,7 @@ pbk_main(int argc, char *argv[]) wal_file_path, wal_file_name, batch_size, !no_validate_wal); break; case ADD_INSTANCE_CMD: - { - int err = 0; - err = do_add_instance(instanceState, &instance_config); - return err; - } + return do_add_instance(instanceState, &instance_config); case DELETE_INSTANCE_CMD: return do_delete_instance(instanceState); case INIT_CMD: From c77b50b7b16df20ea0f352b7dc615af099cdcaea Mon Sep 17 00:00:00 2001 From: Sergey Fukanchik Date: Sun, 18 Dec 2022 14:49:35 +0300 Subject: [PATCH 262/339] PBCKP-80 add catalog_new for tests --- src/catalog.c | 15 +++++++++++++++ src/pg_probackup.c | 8 +------- src/pg_probackup.h | 1 + 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index 8ffd6f813..8701ce210 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -818,6 +818,21 @@ pgBackupGetBackupMode(pgBackup *backup, bool show_color) return backupModes[backup->backup_mode]; } +/* Build `CatalogState' from `backup_path' */ +CatalogState * +catalog_new(const char *backup_path) +{ + CatalogState *catalogState = pgut_new0(CatalogState); + strncpy(catalogState->catalog_path, backup_path, MAXPGPATH); + join_path_components(catalogState->backup_subdir_path, + catalogState->catalog_path, BACKUPS_DIR); + join_path_components(catalogState->wal_subdir_path, + catalogState->catalog_path, WAL_SUBDIR); + catalogState->backup_location = pioDriveForLocation(FIO_BACKUP_HOST); + + return catalogState; +} + /* * Create list of instances in given backup catalog. * diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 4c44e4ba0..5c55bd9dd 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -474,13 +474,7 @@ pbk_main(int argc, char *argv[]) if (!is_absolute_path(backup_path)) elog(ERROR, "-B, --backup-path must be an absolute path"); - catalogState = pgut_new0(CatalogState); - strncpy(catalogState->catalog_path, backup_path, MAXPGPATH); - join_path_components(catalogState->backup_subdir_path, - catalogState->catalog_path, BACKUPS_DIR); - join_path_components(catalogState->wal_subdir_path, - catalogState->catalog_path, WAL_SUBDIR); - catalogState->backup_location = pioDriveForLocation(FIO_BACKUP_HOST); + catalogState = catalog_new(backup_path); } /* backup_path is required for all pg_probackup commands except help, version, checkdb and catchup */ diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 56eb913c7..a0b34e870 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -888,6 +888,7 @@ extern parray* get_history_streaming(ConnectionOptions *conn_opt, TimeLineID tli #define PAGE_LSN_FROM_FUTURE (-6) /* in catalog.c */ +extern CatalogState * catalog_new(const char *backup_path); extern pgBackup *read_backup(pioDrive_i drive, const char *root_dir); extern pgBackup *readBackupControlFile(pioDrive_i drive, const char *path); extern void write_backup(pgBackup *backup, bool strict); From 7365f24bc545441d13578bda5d2a29d2d547ecfb Mon Sep 17 00:00:00 2001 From: Sergey Fukanchik Date: Mon, 19 Dec 2022 12:28:16 +0300 Subject: [PATCH 263/339] PBCKP-80 Add unit tests --- Makefile | 18 +- unit/CUnit-Run.dtd | 36 ++++ unit/CUnit-Run.xsl | 173 ++++++++++++++++++ unit/Makefile | 26 +++ unit/pg_control.TEST | Bin 0 -> 8192 bytes unit/pgpbkp.c | 37 ++++ unit/pgunit.c | 183 +++++++++++++++++++ unit/pgunit.h | 27 +++ unit/test_pio.c | 408 ++++++++++++++++++++++++++++++++++++++++++ unit/test_probackup.c | 86 +++++++++ unit/unit_pub.sh | 9 + 11 files changed, 995 insertions(+), 8 deletions(-) create mode 100644 unit/CUnit-Run.dtd create mode 100644 unit/CUnit-Run.xsl create mode 100644 unit/Makefile create mode 100644 unit/pg_control.TEST create mode 100644 unit/pgpbkp.c create mode 100644 unit/pgunit.c create mode 100644 unit/pgunit.h create mode 100644 unit/test_pio.c create mode 100644 unit/test_probackup.c create mode 100644 unit/unit_pub.sh diff --git a/Makefile b/Makefile index 679524f5a..c588c44c9 100644 --- a/Makefile +++ b/Makefile @@ -31,18 +31,16 @@ top_pbk_srcdir := $(dir $(realpath $(firstword $(MAKEFILE_LIST)))) PROGRAM := pg_probackup # pg_probackup sources -OBJS := src/utils/configuration.o src/utils/json.o src/utils/logger.o \ +PGPOBJS := src/utils/configuration.o src/utils/json.o src/utils/logger.o \ src/utils/parray.o src/utils/pgut.o src/utils/thread.o src/utils/remote.o src/utils/file.o -OBJS += src/archive.o src/backup.o src/catalog.o src/checkdb.o src/configure.o src/data.o \ +PGPOBJS += src/archive.o src/backup.o src/catalog.o src/checkdb.o src/configure.o src/data.o \ src/delete.o src/dir.o src/help.o src/init.o src/merge.o \ src/parsexlog.o src/ptrack.o src/pg_probackup.o src/restore.o src/show.o src/stream.o \ src/util.o src/validate.o src/datapagemap.o src/catchup.o \ src/compatibility/pg-11.o src/utils/simple_prompt.o -OBJS += src/compatibility/file_compat.o src/compatibility/receivelog.o \ +PGPOBJS += src/compatibility/file_compat.o src/compatibility/receivelog.o \ src/compatibility/streamutil.o \ src/compatibility/walmethods.o src/compatibility/file_compat10.o -# artificial file for `main` function -OBJS += src/main.o # sources borrowed from postgresql (paths are relative to pg top dir) BORROWED_H_SRC := @@ -52,18 +50,21 @@ BORROWED_C_SRC := \ BORROW_DIR := src/borrowed BORROWED_H := $(addprefix $(BORROW_DIR)/, $(notdir $(BORROWED_H_SRC))) BORROWED_C := $(addprefix $(BORROW_DIR)/, $(notdir $(BORROWED_C_SRC))) -OBJS += $(patsubst %.c, %.o, $(BORROWED_C)) +PGPOBJS += $(patsubst %.c, %.o, $(BORROWED_C)) EXTRA_CLEAN := $(BORROWED_H) $(BORROWED_C) $(BORROW_DIR) borrowed.mk -OBJS += src/fu_util/impl/ft_impl.o src/fu_util/impl/fo_impl.o +PGPOBJS += src/fu_util/impl/ft_impl.o src/fu_util/impl/fo_impl.o # off-source build support ifneq ($(abspath $(CURDIR))/, $(top_pbk_srcdir)) VPATH := $(top_pbk_srcdir) endif +# artificial file for `main` function +OBJS += $(PGPOBJS) src/main.o + # standard PGXS stuff -# all OBJS must be defined above this +# all PGPOBJS must be defined above this ifdef USE_PGXS PG_CONFIG = pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) @@ -114,3 +115,4 @@ include $(top_pbk_srcdir)/packaging/Makefile.pkg include $(top_pbk_srcdir)/packaging/Makefile.repo include $(top_pbk_srcdir)/packaging/Makefile.test +include $(top_pbk_srcdir)/unit/Makefile \ No newline at end of file diff --git a/unit/CUnit-Run.dtd b/unit/CUnit-Run.dtd new file mode 100644 index 000000000..e0f56b8a3 --- /dev/null +++ b/unit/CUnit-Run.dtd @@ -0,0 +1,36 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/unit/CUnit-Run.xsl b/unit/CUnit-Run.xsl new file mode 100644 index 000000000..b46b729fc --- /dev/null +++ b/unit/CUnit-Run.xsl @@ -0,0 +1,173 @@ + + + + + + + CUnit - Automated Test Run Summary Report + + + + + + + + + +
+

+ CUnit - A Unit testing framework for C.
+ http://cunit.sourceforge.net/ +

+
+
+ + +

+

+

Automated Test Run Results

+
+ + + + + + + + +
+
+ + + + + + + + + + + + Running Suite + + + + + + + + + + + + + Running Group + + + + + + + + + + + + + Running test ... + + Passed + + + + + + + + Running test ... + + Failed + + + + + + + + + + + + + + + +
File Name + + Line Number + +
Condition + +
+ + +
+ + + + + Running Suite ... + + + + + + + + + + + Running Group ... + + + + + + + + +

+ + + + + + + + + + + + + + + + + + + + + + +
Cumulative Summary for Run
Type Total Run Succeeded Failed Inactive
+ + + +

+


+
+
+ +
diff --git a/unit/Makefile b/unit/Makefile new file mode 100644 index 000000000..b4a2b5015 --- /dev/null +++ b/unit/Makefile @@ -0,0 +1,26 @@ +APPS=unit/test_unit_pio unit/test_unit_probackup + +test_unit: $(APPS) + + +TEST_FILES=unit/test_probackup.o unit/test_pio.o + +unit/test_unit_pio: LIBS += -lcunit +unit/test_unit_pio: $(PGPOBJS) unit/pgunit.o unit/test_pio.o + $(CC) $(CFLAGS) $^ $(LDFLAGS) $(LIBS) $(PG_LIBS_INTERNAL) -o $@ + +unit/test_unit_probackup: LIBS += -lcunit +unit/test_unit_probackup: $(PGPOBJS) unit/pgunit.o unit/test_probackup.o + $(CC) $(CFLAGS) $^ $(LDFLAGS) $(LIBS) $(PG_LIBS_INTERNAL) -o $@ + +test_unit_run: test_unit + (cd unit; ./test_unit_pio) + (cd unit; ./test_unit_probackup) + +test_unit_pub: test_unit_run + (cd unit ; /bin/sh unit_pub.sh) + +clean: test_clean + +test_clean: + rm -rf $(APPS) unit/*.o *~ *.gcda *.gcno *.html *.xml pb.info gmon.out report/ diff --git a/unit/pg_control.TEST b/unit/pg_control.TEST new file mode 100644 index 0000000000000000000000000000000000000000..824236718a41dc272fbca172795ed3089de3cd99 GIT binary patch literal 8192 zcmeIuF%Q8|5C`yUMM-Hz2*yStVU^fxlK22(Y*LADz+^zeW;XN#@Bu6xBxW7ODmI(J z7jUP2Fio{?tr3rAd6~%-H_6hISYO*+a-`yqd@^Y+pQMa3yz!vC zC97&yHobYN@Z6DpJWnpIebHL37DK~1^ut^8^s3t-k0Brc0SG_<0uX=z1Rwwb2tWV= d5P$##AOHafKmY;|fB*y_009U<00Mt4@CI(O9z_5E literal 0 HcmV?d00001 diff --git a/unit/pgpbkp.c b/unit/pgpbkp.c new file mode 100644 index 000000000..43be09c4a --- /dev/null +++ b/unit/pgpbkp.c @@ -0,0 +1,37 @@ +#include +#include +#include + +#include + +#include +#include + +#include + +#include "pgunit.h" + +/* Emulate pgprobackup */ +bool show_color = true; +ShowFormat show_format = SHOW_PLAIN; +const char *PROGRAM_NAME = NULL; +const char *PROGRAM_NAME_FULL = NULL; +const char *PROGRAM_FULL_PATH = NULL; +pid_t my_pid = 0; +bool is_archive_cmd = false; +bool remote_agent = false; +time_t current_time = 0; +char *replication_slot = NULL; +pgBackup current; +bool perm_slot = false; +bool temp_slot = false; +bool progress = false; +int num_threads = 1; +bool delete_wal = false; +bool merge_expired = false; +bool smooth_checkpoint; +bool skip_block_validation = false; +bool dry_run = false; +bool delete_expired = false; + +/***********************/ diff --git a/unit/pgunit.c b/unit/pgunit.c new file mode 100644 index 000000000..5daac7105 --- /dev/null +++ b/unit/pgunit.c @@ -0,0 +1,183 @@ +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include "pgunit.h" + +pioDrive_i drive; +pioDBDrive_i dbdrive; +pioDrive_i cloud_drive; +pioDBDrive_i local_drive; + +int should_be_remote; + +void +init_test_drives() +{ + local_drive = pioDBDriveForLocation(FIO_LOCAL_HOST); +} + +int +USE_LOCAL() +{ + drive = $reduce(pioDrive, local_drive); + dbdrive = local_drive; + should_be_remote = 0; + printf("USE_LOCAL\n"); + return 0; +} + +static int +clean_basic_suite() +{ + return 0; +} + +#define FNAMES "abcdefghijklmnopqrstuvwxyz0123456789" + +static int rand_init=0; +char * +random_path(void) +{ + char name[MAXPGPATH]; + + if(!rand_init) { + srand(time(NULL)); + rand_init = 1; + } + + int len = 3 + rand() % 20; + int fnlen = strlen(FNAMES); + name[0]=0; + snprintf(name, MAXPGPATH, "/tmp/%d_", getpid()); + int i; + int l=strlen(name); + for(i=l; i < len+l; ++i) + { + name[i] = FNAMES[rand()%fnlen]; + } + name[i] = 0; + + return strdup(name); +} + +char * +random_name(void) +{ + char name[MAXPGPATH]; + + if(!rand_init) { + srand(time(NULL)); + rand_init = 1; + } + + int len = 3 + rand() % 10; + int fnlen = strlen(FNAMES); + int i; + for(i=0;i=0); + int fdout = open(to, O_CREAT|O_RDWR|O_TRUNC, FILE_PERMISSION); + CU_ASSERT_FATAL(fdout>=0); + while(1) + { + char buf[BUFSZ]; + int rc = read(fdin, buf, BUFSZ); + CU_ASSERT_FATAL(rc>=0); + if(rc==0) break; + int written = write(fdout, buf, rc); + CU_ASSERT_FATAL(written == rc); + } + close(fdin); + fsync(fdout); + close(fdout); +} + +void +init_fake_server(const char *path) +{ + char global[8192]; + snprintf(global, 8192, "%s/global", path); + int rc = mkdir(path, DIR_PERMISSION); + CU_ASSERT_FATAL(rc == 0); + rc = mkdir(global, DIR_PERMISSION); + CU_ASSERT_FATAL(rc == 0); + char global2[MAXPGPATH]; + snprintf(global2, MAXPGPATH, "%s/pg_control", global); + copy_file("pg_control.TEST", global2); +} + +void +pbk_add_tests(int (*init)(void), const char *suite_name, PBK_test_description *tests) +{ + CU_pSuite pSuite; + int i; + + pSuite = CU_add_suite(suite_name, init, clean_basic_suite); + if(pSuite==NULL) + { + fprintf(stderr, "Can't add a suite %s\n", suite_name); + CU_cleanup_registry(); + abort(); + } + + for(i = 0; tests[i].name; ++i) + { + if(CU_add_test(pSuite, tests[i].name, tests[i].foo) == NULL) + { + fprintf(stderr, "Can't add test %s.%s\n", suite_name, tests[i].name); + CU_cleanup_registry(); + abort(); + } + } +} + +void +pio_write(pioDrive_i drive, path_t path, const char *data) +{ + FOBJ_FUNC_ARP(); + err_i err=$noerr(); + err = $i(pioWriteFile, drive, .path = path, .content = ft_bytes((char *)data, strlen(data)), .binary = true); + CU_ASSERT(!$haserr(err)); +} + +bool +pio_exists(pioDrive_i drive, path_t path) +{ + FOBJ_FUNC_ARP(); + err_i err=$noerr(); + + bool exists = $i(pioExists, drive, .path = path, .expected_kind = PIO_KIND_REGULAR, &err); + if ($haserr(err)) + fprintf(stderr, "pio_exists: %s\n", $errmsg(err)); + CU_ASSERT(!$haserr(err)); + return exists; +} +bool +pio_exists_d(pioDrive_i drive, path_t path) +{ + FOBJ_FUNC_ARP(); + err_i err=$noerr(); + + bool exists = $i(pioExists, drive, .path = path, .expected_kind = PIO_KIND_DIRECTORY, &err); + CU_ASSERT(!$haserr(err)); + return exists; +} diff --git a/unit/pgunit.h b/unit/pgunit.h new file mode 100644 index 000000000..fd86ca22d --- /dev/null +++ b/unit/pgunit.h @@ -0,0 +1,27 @@ +#ifdef __pgunit_h__ +#error "Double #include of pgunit.h" +#endif + +#define __pgunit_h__ 1 + +#define BUFSZ 8192 + +typedef struct { + const char *name; + void (*foo)(void); +} PBK_test_description; + +extern pioDrive_i drive; +extern pioDBDrive_i dbdrive; +extern int should_be_remote; + +void init_test_drives(void); +int USE_LOCAL(void); +char *random_path(void); +char *random_name(void); +void pbk_add_tests(int (*init)(void), const char *sub_name, PBK_test_description *tests); +void pio_write(pioDrive_i drive, path_t name, const char *data); +bool pio_exists(pioDrive_i drive, path_t path); +bool pio_exists_d(pioDrive_i drive, path_t path); +void copy_file(const char *from, const char *to); +void init_fake_server(const char *path); diff --git a/unit/test_pio.c b/unit/test_pio.c new file mode 100644 index 000000000..675af577c --- /dev/null +++ b/unit/test_pio.c @@ -0,0 +1,408 @@ +#include +#include +#include + +#include +#include + +#include +#include + +#include "pgunit.h" + +#define TEST_STR "test\n" +#define BUFSZ 8192 + +#define XXX_STR "XXX" + +static void +test_pioStat() +{ + FOBJ_FUNC_ARP(); + err_i err = $noerr(); + char *path = random_path(); + + err = $i(pioWriteFile, drive, .path = path, .content = ft_bytes(TEST_STR, strlen(TEST_STR)), .binary = true); + CU_ASSERT(!$haserr(err)); + time_t now = time(NULL); + + pio_stat_t pst = $i(pioStat, drive, .path = path, .follow_symlink = false, .err = &err); + + CU_ASSERT(!$haserr(err)); + + CU_ASSERT(pst.pst_kind == PIO_KIND_REGULAR); + CU_ASSERT(pst.pst_mode == FILE_PERMISSION); + CU_ASSERT(abs(now-pst.pst_mtime) < 2); + CU_ASSERT(pst.pst_size == 5); +} + +static void +test_pioRemove() +{ + FOBJ_FUNC_ARP(); + + char *path = random_path(); + pio_write(drive, path, TEST_STR); + CU_ASSERT(pio_exists(drive, path)); + + err_i err = $i(pioRemove, drive, .path = path, .missing_ok = false); + + CU_ASSERT(!$haserr(err)); + + CU_ASSERT(!pio_exists(drive, path)); +} + +static void +test_pioRename() +{ + FOBJ_FUNC_ARP(); + + char *name = random_path(); + char *another_name = random_path(); + + pio_write(drive, name, TEST_STR); + CU_ASSERT(pio_exists(drive, name)); + + err_i err = $i(pioRename, dbdrive, .old_path = name, .new_path = another_name); + CU_ASSERT(!$haserr(err)); + + CU_ASSERT(!pio_exists(drive, name)); + CU_ASSERT(pio_exists(drive, another_name)); +} + +static void +test_pioExists() +{ + FOBJ_FUNC_ARP(); + + err_i err = $noerr(); + bool exists = $i(pioExists, drive, .path = "/", .expected_kind = PIO_KIND_DIRECTORY, &err); + CU_ASSERT(!$haserr(err)); + CU_ASSERT(exists); + + const char *path = random_path(); + err = $noerr(); + exists = $i(pioExists, drive, .path = path, .expected_kind = PIO_KIND_REGULAR, &err); + CU_ASSERT(!$haserr(err)); + CU_ASSERT(!exists); + + char *name = random_path(); + pio_write(drive, name, TEST_STR); + exists = $i(pioExists, drive, .path = name, .expected_kind = PIO_KIND_REGULAR, &err); + CU_ASSERT(!$haserr(err)); + CU_ASSERT(exists); +} + +static void +test_pioIsRemote() +{ + FOBJ_FUNC_ARP(); + + if(should_be_remote) { + CU_ASSERT( $i(pioIsRemote, drive) ); + } else { + CU_ASSERT( !$i(pioIsRemote, drive) ); + } +} + +static void +test_pioWriteFile() +{ + FOBJ_FUNC_ARP(); + + err_i err = $noerr(); + char *path = random_path(); + + CU_ASSERT(!pio_exists(drive, path)); + + err = $i(pioWriteFile, drive, .path = path, .content = ft_bytes(TEST_STR, strlen(TEST_STR)), .binary = true); + CU_ASSERT(!$haserr(err)); + + CU_ASSERT(pio_exists(drive, path)); + + ft_bytes_t result = $i(pioReadFile, drive, .path = path, .binary = true, &err); + CU_ASSERT(!$haserr(err)); + CU_ASSERT(result.len==strlen(TEST_STR)); + CU_ASSERT(!strncmp(result.ptr, TEST_STR, strlen(TEST_STR))); + + ft_bytes_free(&result); + + free(path); +} + +static void +test_pioOpenRead() +{ + FOBJ_FUNC_ARP(); + err_i err = $noerr(); + char *path = random_path(); + pio_write(drive, path, TEST_STR); + + CU_ASSERT(pio_exists(drive, path)); + + pioReader_i reader = $i(pioOpenRead, drive, .path = path, &err); + CU_ASSERT(!$haserr(err)); + char B0[8192]; + ft_bytes_t buf = ft_bytes(B0, 8192); + size_t ret = $i(pioRead, reader, .buf = buf, &err); + CU_ASSERT(!$haserr(err)); + CU_ASSERT(ret==strlen(TEST_STR)); + CU_ASSERT(!strncmp(buf.ptr, TEST_STR, strlen(TEST_STR))); + err = $i(pioSeek, reader, 0); + CU_ASSERT(!$haserr(err)); + ft_bytes_t buf2 = ft_bytes(B0+100, 8192); + ret = $i(pioRead, reader, .buf = buf2, &err); + CU_ASSERT(ret==strlen(TEST_STR)); + CU_ASSERT(!strncmp(buf2.ptr, TEST_STR, strlen(TEST_STR))); + + $i(pioClose, reader); + + //ft_bytes_free(&result); + + free(path); +} + +static void +test_pioOpenReadStream() +{ + FOBJ_FUNC_ARP(); + err_i err = $noerr(); + char *path = random_path(); + + pioReadStream_i stream; + /* Crash in pioCloudDrive */ + stream = $i(pioOpenReadStream, drive, .path = path, &err); + CU_ASSERT($haserr(err)); + + pio_write(drive, path, TEST_STR); + + stream = $i(pioOpenReadStream, drive, .path = path, &err); + CU_ASSERT(!$haserr(err)); + + char B0[8192]; + ft_bytes_t buf = ft_bytes(B0, 8192); + size_t ret = $i(pioRead, stream, .buf= buf, &err); + CU_ASSERT(!$haserr(err)); + CU_ASSERT(ret==strlen(TEST_STR)); + CU_ASSERT(!strncmp(buf.ptr, TEST_STR, strlen(TEST_STR))); + $i(pioClose, stream); + free(path); +} + +static void +test_pioGetCRC32() +{ + FOBJ_FUNC_ARP(); + err_i err = $noerr(); + char *path = random_path(); + pg_crc32 crc; + +#if 0 + //crashes. should return errno in err + crc = $i(pioGetCRC32, drive, .path = path, .compressed = false, .err = &err); + CU_ASSERT($haserr(err)); +#endif + pio_write(drive, path, TEST_STR); + crc = $i(pioGetCRC32, drive, .path = path, .compressed = false, .err = &err); + CU_ASSERT(!$haserr(err)); + CU_ASSERT(crc==0xFA94FDDF) +} + + +static void +test_pioMakeDir() +{ + FOBJ_FUNC_ARP(); + + char *path = random_path(); + + CU_ASSERT(!pio_exists(drive, path)); + err_i err = $i(pioMakeDir, drive, .path = path, .mode = DIR_PERMISSION, .strict = true); + CU_ASSERT(!$haserr(err)); + + CU_ASSERT(pio_exists_d(drive, path)); +} + +static void +test_pioMakeDirWithParent() +{ + FOBJ_FUNC_ARP(); + char child[MAXPGPATH]; + char *parent = random_path(); + CU_ASSERT(!pio_exists(drive, parent)); + snprintf(child, MAXPGPATH, "%s/TEST", parent); + + err_i err = $i(pioMakeDir, drive, .path = child, .mode = DIR_PERMISSION, .strict = true); + CU_ASSERT(!$haserr(err)); + CU_ASSERT(pio_exists_d(drive, parent)); + CU_ASSERT(pio_exists_d(drive, child)); + + free(parent); +} + +static void +test_pioRemoveDir() +{ + FOBJ_FUNC_ARP(); + char *path = random_path(); + err_i err = $noerr(); + char path2[8192]; + snprintf(path2, 8192, "%s/%s", path, "sample.txt"); + + CU_ASSERT(!pio_exists(drive, path)); + err = $i(pioMakeDir, drive, .path = path, .mode = DIR_PERMISSION, .strict = true); + CU_ASSERT(!$haserr(err)); + CU_ASSERT(pio_exists_d(drive, path)); + + err = $i(pioWriteFile, drive, .path = path2, .content = ft_bytes(TEST_STR, strlen(TEST_STR)), .binary = true); + CU_ASSERT(!$haserr(err)); + CU_ASSERT(pio_exists(drive, path2)); + $i(pioRemoveDir, drive, path, .root_as_well=false); + CU_ASSERT(!pio_exists(drive, path2)); + CU_ASSERT(pio_exists_d(drive, path)); +} + +static void +test_pioFilesAreSame() +{ + FOBJ_FUNC_ARP(); + + err_i err = $noerr(); + char *path1 = random_path(); + char *path2 = random_path(); + + CU_ASSERT(!pio_exists(drive, path1)); + CU_ASSERT(!pio_exists(drive, path2)); + + err = $i(pioWriteFile, drive, .path = path1, .content = ft_bytes(TEST_STR, strlen(TEST_STR)), .binary = true); + CU_ASSERT(!$haserr(err)); + CU_ASSERT(pio_exists(drive, path1)); + + err = $i(pioWriteFile, drive, .path = path2, .content = ft_bytes(TEST_STR, strlen(TEST_STR)), .binary = true); + CU_ASSERT(!$haserr(err)); + CU_ASSERT(pio_exists(drive, path2)); + + ft_bytes_t result1 = $i(pioReadFile, drive, .path = path1, .binary = true, &err); + CU_ASSERT(!$haserr(err)); + CU_ASSERT(result1.len==strlen(TEST_STR)); + CU_ASSERT(!strncmp(result1.ptr, TEST_STR, strlen(TEST_STR))); + + ft_bytes_t result2 = $i(pioReadFile, drive, .path = path2, .binary = true, &err); + CU_ASSERT(!$haserr(err)); + CU_ASSERT(result2.len==strlen(TEST_STR)); + CU_ASSERT(!strncmp(result2.ptr, TEST_STR, strlen(TEST_STR))); + + CU_ASSERT(result1.len == result2.len); + CU_ASSERT(!memcmp(result1.ptr, result2.ptr, result1.len)); + + ft_bytes_free(&result1); + ft_bytes_free(&result2); + + free(path1); + free(path2); +} + +static void +test_pioReadFile() +{ + FOBJ_FUNC_ARP(); + + err_i err = $noerr(); + char *path = random_path(); + + CU_ASSERT(!pio_exists(drive, path)); + + err = $i(pioWriteFile, drive, .path = path, .content = ft_bytes(TEST_STR, strlen(TEST_STR)), .binary = true); + CU_ASSERT(!$haserr(err)); + + CU_ASSERT(pio_exists(drive, path)); + + ft_bytes_t result = $i(pioReadFile, drive, .path = path, .binary = true, &err); + CU_ASSERT(!$haserr(err)); + CU_ASSERT(result.len==strlen(TEST_STR)); + CU_ASSERT(!strncmp(result.ptr, TEST_STR, strlen(TEST_STR))); + + ft_bytes_free(&result); + + free(path); +} + +static void +test_pioOpenRewrite() +{ + FOBJ_FUNC_ARP(); + err_i err = $noerr(); + char *path = random_path(); + pio_write(drive, path, TEST_STR); + + CU_ASSERT(pio_exists(drive, path)); + + pioWriteCloser_i writer = $i(pioOpenRewrite, drive, .path = path, + .permissions = FILE_PERMISSION, .binary = true, + .use_temp=true, .sync = true, .err = &err); + CU_ASSERT(!$haserr(err)); + char B0[8192]; + snprintf(B0, 8192, XXX_STR); + ft_bytes_t buf = ft_bytes(B0, strlen(B0)); + err = $i(pioWrite, writer, .buf = buf); + CU_ASSERT(!$haserr(err)); + $i(pioClose, writer); + + ft_bytes_t result = $i(pioReadFile, drive, .path = path, .binary = true, &err); + CU_ASSERT(strlen(XXX_STR) == result.len); + CU_ASSERT(!memcmp(XXX_STR, result.ptr, result.len)); + ft_bytes_free(&result); + + free(path); +} + +PBK_test_description PIO_DRIVE_TESTS[] = { + {"Test pioOpenRead", test_pioOpenRead}, + {"Test pioOpenReadStream", test_pioOpenReadStream}, + {"Test pioStat", test_pioStat}, + {"Test pioRemove", test_pioRemove}, + {"Test pioRename", test_pioRename}, + {"Test pioExists", test_pioExists}, + {"Test pioGetCRC32", test_pioGetCRC32}, + {"Test pioIsRemote", test_pioIsRemote}, + {"Test pioMakeDir", test_pioMakeDir}, + {"Test pioMakeDirWithParent", test_pioMakeDirWithParent}, + {"Test pioRemoveDir", test_pioRemoveDir}, + {"Test pioFilesAreSame", test_pioFilesAreSame}, + {"Test pioReadFile", test_pioReadFile}, + {"Test pioWriteFile", test_pioWriteFile}, + {"Test pioOpenRewrite", test_pioOpenRewrite}, + {NULL, NULL} +}; + +PBK_test_description PIO_DB_DRIVE_TESTS[] = { + {NULL, NULL} +}; + +int +main(int argc, char *argv[]) +{ + ft_init_log(elog_ft_log); + fobj_init(); + FOBJ_FUNC_ARP(); + init_pio_objects(); + + init_test_drives(); + if(CUE_SUCCESS != CU_initialize_registry()) + return CU_get_error(); + + pbk_add_tests(USE_LOCAL, "Local pioDrive", PIO_DRIVE_TESTS); + pbk_add_tests(USE_LOCAL, "LOcal pioDBDrive", PIO_DB_DRIVE_TESTS); + + CU_list_tests_to_file(); + + CU_basic_set_mode(CU_BRM_VERBOSE); + CU_set_output_filename("test_pio"); + + CU_basic_run_tests(); + CU_automated_run_tests(); + + CU_cleanup_registry(); + + return CU_get_error(); +} diff --git a/unit/test_probackup.c b/unit/test_probackup.c new file mode 100644 index 000000000..e16d80aac --- /dev/null +++ b/unit/test_probackup.c @@ -0,0 +1,86 @@ +#include +#include +#include + +#include +#include + +#include +#include + +#include "pgunit.h" + +static void +test_do_init() +{ + FOBJ_FUNC_ARP(); + char *backup_path = random_path(); + + CatalogState *catalogState = catalog_new(backup_path); + + int rc = do_init(catalogState); + CU_ASSERT(rc == 0); +} + +static void +test_do_add_instance() +{ + //FOBJ_FUNC_ARP(); + int rc; + char *backup_path = random_path(); + char *instance_name = random_name(); + char *server_path = random_path(); + init_fake_server(server_path); + + CatalogState *catalogState = catalog_new(backup_path); + catalogState->backup_location = drive; + rc = do_init(catalogState); + CU_ASSERT(rc == 0); + + //CU_ASSERT_FATAL(pio_exists_d(drive, backup_path)); + + init_config(&instance_config, instance_name); + instance_config.pgdata = server_path; + InstanceState *instanceState = makeInstanceState(catalogState, instance_name); + instanceState->database_location = drive; + rc = do_add_instance(instanceState, &instance_config); + CU_ASSERT(rc == 0); + + char buf[MAXPGPATH]; + snprintf(buf, MAXPGPATH, "%s/%s/%s", catalogState->backup_subdir_path, instance_name, BACKUP_CATALOG_CONF_FILE); + CU_ASSERT(pio_exists(drive, buf)); +} + + +PBK_test_description PIO_INIT_TESTS[] = { + {"Test do_init", test_do_init}, + {"Test do_add_instance", test_do_add_instance}, + {NULL,NULL}, +}; + +int +main(int argc, char *argv[]) +{ + ft_init_log(elog_ft_log); + fobj_init(); + FOBJ_FUNC_ARP(); + init_pio_objects(); + + init_test_drives(); + + if(CUE_SUCCESS != CU_initialize_registry()) + return CU_get_error(); + + pbk_add_tests(USE_LOCAL, "Local init", PIO_INIT_TESTS); + + CU_basic_set_mode(CU_BRM_VERBOSE); + + CU_basic_run_tests(); + CU_set_output_filename("test_probackup"); + //CU_list_tests_to_file(); + CU_automated_run_tests(); + + CU_cleanup_registry(); + + return CU_get_error(); +} diff --git a/unit/unit_pub.sh b/unit/unit_pub.sh new file mode 100644 index 000000000..6ba09e298 --- /dev/null +++ b/unit/unit_pub.sh @@ -0,0 +1,9 @@ +xsltproc test_pio-Results.xml > results-pio.html 2> /dev/null +xsltproc test_probackup-Results.xml > results-init.html 2> /dev/null +lcov -t "pb" --output pb.info --capture --directory . --directory ../s3 --directory ../src --rc lcov_branch_coverage=1 > /dev/null 2>&1 +genhtml --output report --branch-coverage pb.info > /dev/null 2>&1 +xdg-open report/index.html > /dev/null 2>&1 +xdg-open results-pio.html > /dev/null 2>&1 +if test -s results-init.html ; then + xdg-open results-init.html > /dev/null 2>&1 +fi From f61171c12ed9f10bc41723da7a2bb0718e4dbb52 Mon Sep 17 00:00:00 2001 From: Sergey Fukanchik Date: Mon, 19 Dec 2022 14:22:53 +0300 Subject: [PATCH 264/339] PBCKP-80 More unit test improvements --- unit/pgunit.c | 4 +- unit/pgunit.h | 2 +- unit/test_pio.c | 426 ++++++++++++++++++++++++++++++++---------- unit/test_probackup.c | 21 ++- 4 files changed, 337 insertions(+), 116 deletions(-) diff --git a/unit/pgunit.c b/unit/pgunit.c index 5daac7105..f326945bf 100644 --- a/unit/pgunit.c +++ b/unit/pgunit.c @@ -43,7 +43,7 @@ clean_basic_suite() #define FNAMES "abcdefghijklmnopqrstuvwxyz0123456789" static int rand_init=0; -char * +ft_str_t random_path(void) { char name[MAXPGPATH]; @@ -65,7 +65,7 @@ random_path(void) } name[i] = 0; - return strdup(name); + return ft_strdupc(name); } char * diff --git a/unit/pgunit.h b/unit/pgunit.h index fd86ca22d..2628900f8 100644 --- a/unit/pgunit.h +++ b/unit/pgunit.h @@ -17,7 +17,7 @@ extern int should_be_remote; void init_test_drives(void); int USE_LOCAL(void); -char *random_path(void); +ft_str_t random_path(void); char *random_name(void); void pbk_add_tests(int (*init)(void), const char *sub_name, PBK_test_description *tests); void pio_write(pioDrive_i drive, path_t name, const char *data); diff --git a/unit/test_pio.c b/unit/test_pio.c index 675af577c..c57475457 100644 --- a/unit/test_pio.c +++ b/unit/test_pio.c @@ -20,13 +20,13 @@ test_pioStat() { FOBJ_FUNC_ARP(); err_i err = $noerr(); - char *path = random_path(); + ft_str_t path = random_path(); - err = $i(pioWriteFile, drive, .path = path, .content = ft_bytes(TEST_STR, strlen(TEST_STR)), .binary = true); + err = $i(pioWriteFile, drive, .path = path.ptr, .content = ft_bytes(TEST_STR, strlen(TEST_STR)), .binary = true); CU_ASSERT(!$haserr(err)); time_t now = time(NULL); - pio_stat_t pst = $i(pioStat, drive, .path = path, .follow_symlink = false, .err = &err); + pio_stat_t pst = $i(pioStat, drive, .path = path.ptr, .follow_symlink = false, .err = &err); CU_ASSERT(!$haserr(err)); @@ -34,6 +34,8 @@ test_pioStat() CU_ASSERT(pst.pst_mode == FILE_PERMISSION); CU_ASSERT(abs(now-pst.pst_mtime) < 2); CU_ASSERT(pst.pst_size == 5); + + ft_str_free(&path); } static void @@ -41,33 +43,17 @@ test_pioRemove() { FOBJ_FUNC_ARP(); - char *path = random_path(); - pio_write(drive, path, TEST_STR); - CU_ASSERT(pio_exists(drive, path)); + ft_str_t path = random_path(); + pio_write(drive, path.ptr, TEST_STR); + CU_ASSERT(pio_exists(drive, path.ptr)); - err_i err = $i(pioRemove, drive, .path = path, .missing_ok = false); + err_i err = $i(pioRemove, drive, .path = path.ptr, .missing_ok = false); CU_ASSERT(!$haserr(err)); - CU_ASSERT(!pio_exists(drive, path)); -} - -static void -test_pioRename() -{ - FOBJ_FUNC_ARP(); - - char *name = random_path(); - char *another_name = random_path(); + CU_ASSERT(!pio_exists(drive, path.ptr)); - pio_write(drive, name, TEST_STR); - CU_ASSERT(pio_exists(drive, name)); - - err_i err = $i(pioRename, dbdrive, .old_path = name, .new_path = another_name); - CU_ASSERT(!$haserr(err)); - - CU_ASSERT(!pio_exists(drive, name)); - CU_ASSERT(pio_exists(drive, another_name)); + ft_str_free(&path); } static void @@ -80,17 +66,20 @@ test_pioExists() CU_ASSERT(!$haserr(err)); CU_ASSERT(exists); - const char *path = random_path(); + ft_str_t path = random_path(); err = $noerr(); - exists = $i(pioExists, drive, .path = path, .expected_kind = PIO_KIND_REGULAR, &err); + exists = $i(pioExists, drive, .path = path.ptr, .expected_kind = PIO_KIND_REGULAR, &err); CU_ASSERT(!$haserr(err)); CU_ASSERT(!exists); - char *name = random_path(); - pio_write(drive, name, TEST_STR); - exists = $i(pioExists, drive, .path = name, .expected_kind = PIO_KIND_REGULAR, &err); + ft_str_t name = random_path(); + pio_write(drive, name.ptr, TEST_STR); + exists = $i(pioExists, drive, .path = name.ptr, .expected_kind = PIO_KIND_REGULAR, &err); CU_ASSERT(!$haserr(err)); CU_ASSERT(exists); + + ft_str_free(&path); + ft_str_free(&name); } static void @@ -111,23 +100,22 @@ test_pioWriteFile() FOBJ_FUNC_ARP(); err_i err = $noerr(); - char *path = random_path(); + ft_str_t path = random_path(); - CU_ASSERT(!pio_exists(drive, path)); + CU_ASSERT(!pio_exists(drive, path.ptr)); - err = $i(pioWriteFile, drive, .path = path, .content = ft_bytes(TEST_STR, strlen(TEST_STR)), .binary = true); + err = $i(pioWriteFile, drive, .path = path.ptr, .content = ft_bytes(TEST_STR, strlen(TEST_STR)), .binary = true); CU_ASSERT(!$haserr(err)); - CU_ASSERT(pio_exists(drive, path)); + CU_ASSERT(pio_exists(drive, path.ptr)); - ft_bytes_t result = $i(pioReadFile, drive, .path = path, .binary = true, &err); + ft_bytes_t result = $i(pioReadFile, drive, .path = path.ptr, .binary = true, &err); CU_ASSERT(!$haserr(err)); CU_ASSERT(result.len==strlen(TEST_STR)); CU_ASSERT(!strncmp(result.ptr, TEST_STR, strlen(TEST_STR))); ft_bytes_free(&result); - - free(path); + ft_str_free(&path); } static void @@ -135,12 +123,12 @@ test_pioOpenRead() { FOBJ_FUNC_ARP(); err_i err = $noerr(); - char *path = random_path(); - pio_write(drive, path, TEST_STR); + ft_str_t path = random_path(); + pio_write(drive, path.ptr, TEST_STR); - CU_ASSERT(pio_exists(drive, path)); + CU_ASSERT(pio_exists(drive, path.ptr)); - pioReader_i reader = $i(pioOpenRead, drive, .path = path, &err); + pioReader_i reader = $i(pioOpenRead, drive, .path = path.ptr, &err); CU_ASSERT(!$haserr(err)); char B0[8192]; ft_bytes_t buf = ft_bytes(B0, 8192); @@ -159,24 +147,25 @@ test_pioOpenRead() //ft_bytes_free(&result); - free(path); + ft_str_free(&path); } static void test_pioOpenReadStream() { + // return enoent for non existent file. same for pioStat FOBJ_FUNC_ARP(); err_i err = $noerr(); - char *path = random_path(); + ft_str_t path = random_path(); pioReadStream_i stream; /* Crash in pioCloudDrive */ - stream = $i(pioOpenReadStream, drive, .path = path, &err); + stream = $i(pioOpenReadStream, drive, .path = path.ptr, &err); CU_ASSERT($haserr(err)); - pio_write(drive, path, TEST_STR); + pio_write(drive, path.ptr, TEST_STR); - stream = $i(pioOpenReadStream, drive, .path = path, &err); + stream = $i(pioOpenReadStream, drive, .path = path.ptr, &err); CU_ASSERT(!$haserr(err)); char B0[8192]; @@ -186,7 +175,7 @@ test_pioOpenReadStream() CU_ASSERT(ret==strlen(TEST_STR)); CU_ASSERT(!strncmp(buf.ptr, TEST_STR, strlen(TEST_STR))); $i(pioClose, stream); - free(path); + ft_str_free(&path); } static void @@ -194,16 +183,16 @@ test_pioGetCRC32() { FOBJ_FUNC_ARP(); err_i err = $noerr(); - char *path = random_path(); + ft_str_t path = random_path(); pg_crc32 crc; #if 0 //crashes. should return errno in err - crc = $i(pioGetCRC32, drive, .path = path, .compressed = false, .err = &err); + crc = $i(pioGetCRC32, drive, .path = path.ptr, .compressed = false, .err = &err); CU_ASSERT($haserr(err)); #endif - pio_write(drive, path, TEST_STR); - crc = $i(pioGetCRC32, drive, .path = path, .compressed = false, .err = &err); + pio_write(drive, path.ptr, TEST_STR); + crc = $i(pioGetCRC32, drive, .path = path.ptr, .compressed = false, .err = &err); CU_ASSERT(!$haserr(err)); CU_ASSERT(crc==0xFA94FDDF) } @@ -214,13 +203,13 @@ test_pioMakeDir() { FOBJ_FUNC_ARP(); - char *path = random_path(); + ft_str_t path = random_path(); - CU_ASSERT(!pio_exists(drive, path)); - err_i err = $i(pioMakeDir, drive, .path = path, .mode = DIR_PERMISSION, .strict = true); + CU_ASSERT(!pio_exists(drive, path.ptr)); + err_i err = $i(pioMakeDir, drive, .path = path.ptr, .mode = DIR_PERMISSION, .strict = true); CU_ASSERT(!$haserr(err)); - CU_ASSERT(pio_exists_d(drive, path)); + CU_ASSERT(pio_exists_d(drive, path.ptr)); } static void @@ -228,38 +217,214 @@ test_pioMakeDirWithParent() { FOBJ_FUNC_ARP(); char child[MAXPGPATH]; - char *parent = random_path(); - CU_ASSERT(!pio_exists(drive, parent)); - snprintf(child, MAXPGPATH, "%s/TEST", parent); + ft_str_t parent = random_path(); + CU_ASSERT(!pio_exists(drive, parent.ptr)); + snprintf(child, MAXPGPATH, "%s/TEST", parent.ptr); err_i err = $i(pioMakeDir, drive, .path = child, .mode = DIR_PERMISSION, .strict = true); CU_ASSERT(!$haserr(err)); - CU_ASSERT(pio_exists_d(drive, parent)); + CU_ASSERT(pio_exists_d(drive, parent.ptr)); CU_ASSERT(pio_exists_d(drive, child)); - free(parent); + ft_str_free(&parent); +} + +static void +test_pioListDirCanWithSlash() +{ + FOBJ_FUNC_ARP(); + err_i err = $noerr(); + ft_str_t root = random_path(); + ft_str_t slash = ft_asprintf("%s/", root.ptr); + ft_str_t child = ft_asprintf("%s/sample.txt", root.ptr); + + CU_ASSERT(!pio_exists(drive, root.ptr)); + err = $i(pioMakeDir, drive, .path = root.ptr, .mode = DIR_PERMISSION, .strict = true); + CU_ASSERT(!$haserr(err)); + CU_ASSERT(pio_exists_d(drive, root.ptr)); + + err = $i(pioWriteFile, drive, .path = child.ptr, .content = ft_bytes(TEST_STR, strlen(TEST_STR)), .binary = true); + CU_ASSERT(!$haserr(err)); + + pioDirIter_i dir = $i(pioOpenDir, drive, .path = slash.ptr, .err = &err); + CU_ASSERT(!$haserr(err)); + + int count = 0; + while (true) + { + pio_dirent_t entry = $i(pioDirNext, dir, &err); + CU_ASSERT(!$haserr(err)); + if (entry.stat.pst_kind == PIO_KIND_UNKNOWN) break; + CU_ASSERT(ft_strcmp(entry.name, ft_cstr("sample.txt")) == FT_CMP_EQ); + count++; + } + CU_ASSERT(count == 1); + err = $i(pioClose, dir); + CU_ASSERT(!$haserr(err)); + + ft_str_free(&root); + ft_str_free(&slash); + ft_str_free(&child); +} + +static void +test_pioListDir() +{ + FOBJ_FUNC_ARP(); + ft_str_t root = random_path(); + ft_str_t child = ft_asprintf("%s/sample.txt", root.ptr); + ft_str_t sub_dir = ft_asprintf("%s/subdir", root.ptr); + ft_str_t sub_child = ft_asprintf("%s/subdir/xxx.txt", root.ptr); + err_i err = $noerr(); + int i; + + CU_ASSERT(!pio_exists(drive, root.ptr)); + err = $i(pioMakeDir, drive, .path = root.ptr, .mode = DIR_PERMISSION, .strict = true); + CU_ASSERT(!$haserr(err)); + CU_ASSERT(pio_exists_d(drive, root.ptr)); + + err = $i(pioWriteFile, drive, .path = child.ptr, .content = ft_bytes(TEST_STR, strlen(TEST_STR)), .binary = true); + CU_ASSERT(!$haserr(err)); + + err = $i(pioMakeDir, drive, .path = sub_dir.ptr, .mode = DIR_PERMISSION, .strict = true); + CU_ASSERT(!$haserr(err)); + + err = $i(pioWriteFile, drive, .path = sub_child.ptr, .content = ft_bytes(TEST_STR, strlen(TEST_STR)), .binary = true); + CU_ASSERT(!$haserr(err)); + + pioDirIter_i dir = $i(pioOpenDir, drive, .path = root.ptr, .err = &err); + CU_ASSERT(!$haserr(err)); + +#define NUM_EXPECTED 2 + const char *expected[NUM_EXPECTED] = {"sample.txt", "subdir"}; + int count = 0; + for (count = 0; true; count++) + { + pio_dirent_t entry = $i(pioDirNext, dir, &err); + CU_ASSERT(!$haserr(err)); + + if (entry.stat.pst_kind == PIO_KIND_UNKNOWN) break; + + for(i = 0; i < NUM_EXPECTED; ++i) + { + if(ft_strcmp(entry.name, ft_cstr(expected[i])) != FT_CMP_EQ) + continue; + expected[i] = NULL; + } + } + + for(i = 0; i < NUM_EXPECTED; ++i) + { + CU_ASSERT(expected[i] == NULL); + } + + CU_ASSERT(count == NUM_EXPECTED); + + err = $i(pioClose, dir); + CU_ASSERT(!$haserr(err)); + + dir = $i(pioOpenDir, drive, .path = sub_dir.ptr, .err = &err); + CU_ASSERT(!$haserr(err)); + + count = 0; + for (count = 0; true; count++) + { + pio_dirent_t entry = $i(pioDirNext, dir, &err); + CU_ASSERT(!$haserr(err)); + + if (entry.stat.pst_kind == PIO_KIND_UNKNOWN) break; + + CU_ASSERT(ft_strcmp(entry.name, ft_cstr("xxx.txt")) == FT_CMP_EQ); + } + + for(i = 0; i < NUM_EXPECTED; ++i) + { + CU_ASSERT(expected[i] == NULL); + } + + CU_ASSERT(count == 1); + + err = $i(pioClose, dir); + CU_ASSERT(!$haserr(err)); + +#undef NUM_EXPECTED +} + +static void +test_pioListDirMTimeAndSize() +{ + FOBJ_FUNC_ARP(); + ft_str_t root = random_path(); + ft_str_t child = ft_asprintf("%s/sample.txt", root.ptr); + err_i err = $noerr(); + int i; + + CU_ASSERT(!pio_exists(drive, root.ptr)); + err = $i(pioMakeDir, drive, .path = root.ptr, .mode = DIR_PERMISSION, .strict = true); + CU_ASSERT(!$haserr(err)); + CU_ASSERT(pio_exists_d(drive, root.ptr)); + + err = $i(pioWriteFile, drive, .path = child.ptr, .content = ft_bytes(TEST_STR, strlen(TEST_STR)), .binary = true); + CU_ASSERT(!$haserr(err)); + time_t created = time(NULL); + + pioDirIter_i dir = $i(pioOpenDir, drive, .path = root.ptr, .err = &err); + CU_ASSERT(!$haserr(err)); + +#define NUM_EXPECTED 1 + const char *expected[NUM_EXPECTED] = {"sample.txt"}; + int count = 0; + for (count = 0; true; count++) + { + pio_dirent_t entry = $i(pioDirNext, dir, &err); + CU_ASSERT(!$haserr(err)); + + if (entry.stat.pst_kind == PIO_KIND_UNKNOWN) break; + + printf("XXX mtime=%ld, size=%ld created=%ld diff=%d\n", entry.stat.pst_mtime, entry.stat.pst_size, created, (int)created-(int)entry.stat.pst_mtime); + CU_ASSERT(entry.stat.pst_mtime == created); + //CU_ASSERT(entry.stat.pst_mtime == (created+3600*3)); + CU_ASSERT(entry.stat.pst_size == strlen(TEST_STR)); + + for(i = 0; i < NUM_EXPECTED; ++i) + { + if(ft_strcmp(entry.name, ft_cstr(expected[i])) != FT_CMP_EQ) + continue; + expected[i] = NULL; + } + } + + for(i = 0; i < NUM_EXPECTED; ++i) + { + CU_ASSERT(expected[i] == NULL); + } + + CU_ASSERT(count == NUM_EXPECTED); + + err = $i(pioClose, dir); + CU_ASSERT(!$haserr(err)); } static void test_pioRemoveDir() { FOBJ_FUNC_ARP(); - char *path = random_path(); + ft_str_t path = random_path(); err_i err = $noerr(); char path2[8192]; - snprintf(path2, 8192, "%s/%s", path, "sample.txt"); + snprintf(path2, 8192, "%s/%s", path.ptr, "sample.txt"); - CU_ASSERT(!pio_exists(drive, path)); - err = $i(pioMakeDir, drive, .path = path, .mode = DIR_PERMISSION, .strict = true); + CU_ASSERT(!pio_exists(drive, path.ptr)); + err = $i(pioMakeDir, drive, .path = path.ptr, .mode = DIR_PERMISSION, .strict = true); CU_ASSERT(!$haserr(err)); - CU_ASSERT(pio_exists_d(drive, path)); + CU_ASSERT(pio_exists_d(drive, path.ptr)); err = $i(pioWriteFile, drive, .path = path2, .content = ft_bytes(TEST_STR, strlen(TEST_STR)), .binary = true); CU_ASSERT(!$haserr(err)); CU_ASSERT(pio_exists(drive, path2)); - $i(pioRemoveDir, drive, path, .root_as_well=false); + $i(pioRemoveDir, drive, path.ptr, .root_as_well=false); CU_ASSERT(!pio_exists(drive, path2)); - CU_ASSERT(pio_exists_d(drive, path)); + CU_ASSERT(pio_exists_d(drive, path.ptr)); } static void @@ -268,26 +433,26 @@ test_pioFilesAreSame() FOBJ_FUNC_ARP(); err_i err = $noerr(); - char *path1 = random_path(); - char *path2 = random_path(); + ft_str_t path1 = random_path(); + ft_str_t path2 = random_path(); - CU_ASSERT(!pio_exists(drive, path1)); - CU_ASSERT(!pio_exists(drive, path2)); + CU_ASSERT(!pio_exists(drive, path1.ptr)); + CU_ASSERT(!pio_exists(drive, path2.ptr)); - err = $i(pioWriteFile, drive, .path = path1, .content = ft_bytes(TEST_STR, strlen(TEST_STR)), .binary = true); + err = $i(pioWriteFile, drive, .path = path1.ptr, .content = ft_bytes(TEST_STR, strlen(TEST_STR)), .binary = true); CU_ASSERT(!$haserr(err)); - CU_ASSERT(pio_exists(drive, path1)); + CU_ASSERT(pio_exists(drive, path1.ptr)); - err = $i(pioWriteFile, drive, .path = path2, .content = ft_bytes(TEST_STR, strlen(TEST_STR)), .binary = true); + err = $i(pioWriteFile, drive, .path = path2.ptr, .content = ft_bytes(TEST_STR, strlen(TEST_STR)), .binary = true); CU_ASSERT(!$haserr(err)); - CU_ASSERT(pio_exists(drive, path2)); + CU_ASSERT(pio_exists(drive, path2.ptr)); - ft_bytes_t result1 = $i(pioReadFile, drive, .path = path1, .binary = true, &err); + ft_bytes_t result1 = $i(pioReadFile, drive, .path = path1.ptr, .binary = true, &err); CU_ASSERT(!$haserr(err)); CU_ASSERT(result1.len==strlen(TEST_STR)); CU_ASSERT(!strncmp(result1.ptr, TEST_STR, strlen(TEST_STR))); - ft_bytes_t result2 = $i(pioReadFile, drive, .path = path2, .binary = true, &err); + ft_bytes_t result2 = $i(pioReadFile, drive, .path = path2.ptr, .binary = true, &err); CU_ASSERT(!$haserr(err)); CU_ASSERT(result2.len==strlen(TEST_STR)); CU_ASSERT(!strncmp(result2.ptr, TEST_STR, strlen(TEST_STR))); @@ -298,8 +463,8 @@ test_pioFilesAreSame() ft_bytes_free(&result1); ft_bytes_free(&result2); - free(path1); - free(path2); + ft_str_free(&path1); + ft_str_free(&path2); } static void @@ -308,23 +473,23 @@ test_pioReadFile() FOBJ_FUNC_ARP(); err_i err = $noerr(); - char *path = random_path(); + ft_str_t path = random_path(); - CU_ASSERT(!pio_exists(drive, path)); + CU_ASSERT(!pio_exists(drive, path.ptr)); - err = $i(pioWriteFile, drive, .path = path, .content = ft_bytes(TEST_STR, strlen(TEST_STR)), .binary = true); + err = $i(pioWriteFile, drive, .path = path.ptr, .content = ft_bytes(TEST_STR, strlen(TEST_STR)), .binary = true); CU_ASSERT(!$haserr(err)); - CU_ASSERT(pio_exists(drive, path)); + CU_ASSERT(pio_exists(drive, path.ptr)); - ft_bytes_t result = $i(pioReadFile, drive, .path = path, .binary = true, &err); + ft_bytes_t result = $i(pioReadFile, drive, .path = path.ptr, .binary = true, &err); CU_ASSERT(!$haserr(err)); CU_ASSERT(result.len==strlen(TEST_STR)); CU_ASSERT(!strncmp(result.ptr, TEST_STR, strlen(TEST_STR))); ft_bytes_free(&result); - free(path); + ft_str_free(&path); } static void @@ -332,12 +497,12 @@ test_pioOpenRewrite() { FOBJ_FUNC_ARP(); err_i err = $noerr(); - char *path = random_path(); - pio_write(drive, path, TEST_STR); + ft_str_t path = random_path(); + pio_write(drive, path.ptr, TEST_STR); - CU_ASSERT(pio_exists(drive, path)); + CU_ASSERT(pio_exists(drive, path.ptr)); - pioWriteCloser_i writer = $i(pioOpenRewrite, drive, .path = path, + pioWriteCloser_i writer = $i(pioOpenRewrite, drive, .path = path.ptr, .permissions = FILE_PERMISSION, .binary = true, .use_temp=true, .sync = true, .err = &err); CU_ASSERT(!$haserr(err)); @@ -348,34 +513,89 @@ test_pioOpenRewrite() CU_ASSERT(!$haserr(err)); $i(pioClose, writer); - ft_bytes_t result = $i(pioReadFile, drive, .path = path, .binary = true, &err); + ft_bytes_t result = $i(pioReadFile, drive, .path = path.ptr, .binary = true, &err); CU_ASSERT(strlen(XXX_STR) == result.len); CU_ASSERT(!memcmp(XXX_STR, result.ptr, result.len)); ft_bytes_free(&result); - free(path); + ft_str_free(&path); +} + +static void +test_pioSeek() +{ + FOBJ_FUNC_ARP(); + err_i err = $noerr(); + ft_str_t path = random_path(); + pioWriteCloser_i writer = $i(pioOpenRewrite, drive, .path = path.ptr, + .permissions = FILE_PERMISSION, .binary = true, + .use_temp=true, .sync = true, .err = &err); + CU_ASSERT(!$haserr(err)); + char B0[8192]; + snprintf(B0, 8192, "012345678901234567890123012345678901234567890123"); + ft_bytes_t buf = ft_bytes(B0, strlen(B0)); + err = $i(pioWrite, writer, .buf = buf); + CU_ASSERT(!$haserr(err)); + $i(pioClose, writer); + + pioReader_i reader = $i(pioOpenRead, drive, .path = path.ptr, &err); + CU_ASSERT (!$haserr(err)); + +#define TRY_OFFT 1 +#define TRY_LEN 24 + err = $i(pioSeek, reader, TRY_OFFT); + CU_ASSERT(!$haserr(err)); + + ft_bytes_t read_buf = ft_bytes_alloc(TRY_LEN); + size_t rc = $i(pioRead, reader, .buf = read_buf, .err = &err); + CU_ASSERT(!$haserr(err)); + CU_ASSERT(rc == TRY_LEN); + CU_ASSERT(!memcmp(B0+TRY_OFFT, read_buf.ptr, TRY_LEN)); +} + +/* pioDBDrive */ +static void +test_pioRename() +{ + FOBJ_FUNC_ARP(); + pioDBDrive_i db_drive = pioDBDriveForLocation(FIO_LOCAL_HOST); + ft_str_t name = random_path(); + ft_str_t another_name = random_path(); + + pio_write(drive, name.ptr, TEST_STR); + CU_ASSERT(pio_exists(drive, name.ptr)); + + err_i err = $i(pioRename, db_drive, .old_path = name.ptr, .new_path = another_name.ptr); + CU_ASSERT(!$haserr(err)); + + CU_ASSERT(!pio_exists(drive, name.ptr)); + CU_ASSERT(pio_exists(drive, another_name.ptr)); } PBK_test_description PIO_DRIVE_TESTS[] = { - {"Test pioOpenRead", test_pioOpenRead}, + {"Test pioOpenRead", test_pioOpenRead}, {"Test pioOpenReadStream", test_pioOpenReadStream}, - {"Test pioStat", test_pioStat}, - {"Test pioRemove", test_pioRemove}, - {"Test pioRename", test_pioRename}, - {"Test pioExists", test_pioExists}, - {"Test pioGetCRC32", test_pioGetCRC32}, - {"Test pioIsRemote", test_pioIsRemote}, - {"Test pioMakeDir", test_pioMakeDir}, + {"Test pioStat", test_pioStat}, + {"Test pioRemove", test_pioRemove}, + {"Test pioExists", test_pioExists}, + {"Test pioGetCRC32", test_pioGetCRC32}, + {"Test pioIsRemote", test_pioIsRemote}, + {"Test pioMakeDir", test_pioMakeDir}, {"Test pioMakeDirWithParent", test_pioMakeDirWithParent}, - {"Test pioRemoveDir", test_pioRemoveDir}, + {"Test pioListDir", test_pioListDir}, + {"Test pioListDirCanWithSlash", test_pioListDirCanWithSlash}, + {"Test pioListDirMTimeAndSize", test_pioListDirMTimeAndSize}, + {"Test pioRemoveDir", test_pioRemoveDir}, {"Test pioFilesAreSame", test_pioFilesAreSame}, - {"Test pioReadFile", test_pioReadFile}, - {"Test pioWriteFile", test_pioWriteFile}, - {"Test pioOpenRewrite", test_pioOpenRewrite}, + {"Test pioReadFile", test_pioReadFile}, + {"Test pioWriteFile", test_pioWriteFile}, + {"Test pioOpenRewrite", test_pioOpenRewrite}, + {"Test pioSeek", test_pioSeek}, {NULL, NULL} }; PBK_test_description PIO_DB_DRIVE_TESTS[] = { + {"Test pioRename", test_pioRename}, {NULL, NULL} }; diff --git a/unit/test_probackup.c b/unit/test_probackup.c index e16d80aac..0312fb7d4 100644 --- a/unit/test_probackup.c +++ b/unit/test_probackup.c @@ -14,33 +14,34 @@ static void test_do_init() { FOBJ_FUNC_ARP(); - char *backup_path = random_path(); + ft_str_t backup_path = random_path(); + CatalogState *catalogState = catalog_new(backup_path.ptr); + int rc; - CatalogState *catalogState = catalog_new(backup_path); + rc = do_init(catalogState); - int rc = do_init(catalogState); CU_ASSERT(rc == 0); } static void test_do_add_instance() { - //FOBJ_FUNC_ARP(); + FOBJ_FUNC_ARP(); int rc; - char *backup_path = random_path(); + ft_str_t backup_path = random_path(); char *instance_name = random_name(); - char *server_path = random_path(); - init_fake_server(server_path); + ft_str_t server_path = random_path(); + init_fake_server(server_path.ptr); - CatalogState *catalogState = catalog_new(backup_path); + CatalogState *catalogState = catalog_new(backup_path.ptr); catalogState->backup_location = drive; rc = do_init(catalogState); CU_ASSERT(rc == 0); - //CU_ASSERT_FATAL(pio_exists_d(drive, backup_path)); + //CU_ASSERT(pio_exists_d(drive, backup_path.ptr)); init_config(&instance_config, instance_name); - instance_config.pgdata = server_path; + instance_config.pgdata = server_path.ptr; InstanceState *instanceState = makeInstanceState(catalogState, instance_name); instanceState->database_location = drive; rc = do_add_instance(instanceState, &instance_config); From 1a2cd718406a7e74ccce45355c08df11f4b0b5f7 Mon Sep 17 00:00:00 2001 From: Sergey Fukanchik Date: Wed, 21 Dec 2022 13:06:08 +0300 Subject: [PATCH 265/339] PBCKP-80 cleanup test --- unit/test_pio.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/unit/test_pio.c b/unit/test_pio.c index c57475457..d4cb40841 100644 --- a/unit/test_pio.c +++ b/unit/test_pio.c @@ -381,9 +381,7 @@ test_pioListDirMTimeAndSize() if (entry.stat.pst_kind == PIO_KIND_UNKNOWN) break; - printf("XXX mtime=%ld, size=%ld created=%ld diff=%d\n", entry.stat.pst_mtime, entry.stat.pst_size, created, (int)created-(int)entry.stat.pst_mtime); CU_ASSERT(entry.stat.pst_mtime == created); - //CU_ASSERT(entry.stat.pst_mtime == (created+3600*3)); CU_ASSERT(entry.stat.pst_size == strlen(TEST_STR)); for(i = 0; i < NUM_EXPECTED; ++i) From efb5cf7793f997c2fff64a1803265c8691e4dbcf Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 21 Dec 2022 23:16:14 +0300 Subject: [PATCH 266/339] ... --- unit/test_probackup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unit/test_probackup.c b/unit/test_probackup.c index 0312fb7d4..f3e2503e2 100644 --- a/unit/test_probackup.c +++ b/unit/test_probackup.c @@ -43,7 +43,7 @@ test_do_add_instance() init_config(&instance_config, instance_name); instance_config.pgdata = server_path.ptr; InstanceState *instanceState = makeInstanceState(catalogState, instance_name); - instanceState->database_location = drive; + instanceState->database_location = $reduce(pioDrive, dbdrive); rc = do_add_instance(instanceState, &instance_config); CU_ASSERT(rc == 0); From 54602f1a55a7529210c0473b509c3a9e4f1431dd Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 22 Dec 2022 00:43:51 +0300 Subject: [PATCH 267/339] set LC_TIME to C as well private branch uses strptime to parse "always C locale" time. --- src/show.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/src/show.c b/src/show.c index 81ac86f68..71ca0a0a8 100644 --- a/src/show.c +++ b/src/show.c @@ -71,7 +71,8 @@ static PQExpBufferData show_buf; static bool first_instance = true; static int32 json_level = 0; -static const char* lc_env_locale; +static const char* lc_env_locale_numeric; +static const char* lc_env_locale_time; typedef enum { LOCALE_C, // Used for formatting output to unify the dot-based floating point representation LOCALE_ENV // Default environment locale @@ -81,11 +82,14 @@ typedef enum { static locale_t env_locale, c_locale; #endif void memorize_environment_locale() { - lc_env_locale = (const char *)getenv("LC_NUMERIC"); - lc_env_locale = lc_env_locale != NULL ? lc_env_locale : "C"; + lc_env_locale_numeric = (const char *)getenv("LC_NUMERIC"); + lc_env_locale_numeric = lc_env_locale_numeric != NULL ? lc_env_locale_numeric : "C"; + lc_env_locale_time = (const char *)getenv("LC_TIME"); + lc_env_locale_time = lc_env_locale_time != NULL ? lc_env_locale_time : "C"; #ifdef HAVE_USELOCALE - env_locale = newlocale(LC_NUMERIC_MASK, lc_env_locale, (locale_t)0); - c_locale = newlocale(LC_NUMERIC_MASK, "C", (locale_t)0); + env_locale = newlocale(LC_NUMERIC_MASK, lc_env_locale_numeric, (locale_t)0); + env_locale = newlocale(LC_TIME_MASK, lc_env_locale_time, env_locale); + c_locale = newlocale(LC_NUMERIC_MASK|LC_TIME_MASK, "C", (locale_t)0); #else #ifdef HAVE__CONFIGTHREADLOCALE _configthreadlocale(_ENABLE_PER_THREAD_LOCALE); @@ -104,7 +108,8 @@ static void set_output_numeric_locale(output_numeric_locale loc) { #ifdef HAVE_USELOCALE uselocale(loc == LOCALE_C ? c_locale : env_locale); #else - setlocale(LC_NUMERIC, loc == LOCALE_C ? "C" : lc_env_locale); + setlocale(LC_NUMERIC, loc == LOCALE_C ? "C" : lc_env_locale_numeric); + setlocale(LC_TIME, loc == LOCALE_C ? "C" : lc_env_locale_time); #endif } From d15d34bdb5e6fbc540a129f18e511b2a901bc750 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 22 Dec 2022 01:16:30 +0300 Subject: [PATCH 268/339] fix setting LC_TIME --- src/pg_probackup.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 5c55bd9dd..09cff6df9 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -317,6 +317,7 @@ pbk_main(int argc, char *argv[]) // Setting C locale for numeric values in order to impose dot-based floating-point representation memorize_environment_locale(); setlocale(LC_NUMERIC, "C"); + setlocale(LC_TIME, "C"); /* Get current time */ current_time = time(NULL); From f8d010b21db15e27d6199f794c383b49be63f26c Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 22 Dec 2022 01:36:43 +0300 Subject: [PATCH 269/339] rename pio_recursive_dir_t -> pioRecursiveDir pioRecursiveDir is really an object, so pio_recursive_dir_free were quite misleading. --- src/dir.c | 12 ++++++------ src/utils/file.c | 20 ++++++++++---------- src/utils/file.h | 10 +++++----- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/src/dir.c b/src/dir.c index e628e4d45..c450dac3b 100644 --- a/src/dir.c +++ b/src/dir.c @@ -541,13 +541,13 @@ db_list_dir(parray *files, const char* root, FOBJ_FUNC_ARP(); pio_dirent_t dent; pioDrive_i drive; - pio_recursive_dir_t* dir; + pioRecursiveDir* dir; err_i err; drive = pioDriveForLocation(location); /* Open directory and list contents */ - dir = pio_recursive_dir_alloc(drive, root, &err); + dir = pioRecursiveDir_alloc(drive, root, &err); if (dir == NULL) { if (getErrno(err) == ENOENT && external_dir_num == 0) @@ -564,7 +564,7 @@ db_list_dir(parray *files, const char* root, ft_logerr(FT_FATAL, $errmsg(err), "Listing directory"); } - while ((dent = pio_recursive_dir_next(dir, &err)).stat.pst_kind) + while ((dent = pioRecursiveDir_next(dir, &err)).stat.pst_kind) { pgFile *file; char child[MAXPGPATH]; @@ -586,14 +586,14 @@ db_list_dir(parray *files, const char* root, { /* Skip */ if (file->kind == PIO_KIND_DIRECTORY) - pio_recursive_dir_dont_recurse_current(dir); + pioRecursiveDir_dont_recurse_current(dir); pgFileFree(file); continue; } else if (check_res == CHECK_EXCLUDE_FALSE) { ft_assert(file->kind == PIO_KIND_DIRECTORY); - pio_recursive_dir_dont_recurse_current(dir); + pioRecursiveDir_dont_recurse_current(dir); /* We add the directory itself which content was excluded */ parray_append(files, file); continue; @@ -603,7 +603,7 @@ db_list_dir(parray *files, const char* root, parray_append(files, file); } - pio_recursive_dir_free(dir); + pioRecursiveDir_close(dir); if ($haserr(err)) ft_logerr(FT_FATAL, $errmsg(err), "Listing directory \"%s\"", root); } diff --git a/src/utils/file.c b/src/utils/file.c index 8946731dd..cf01e6e36 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1814,8 +1814,8 @@ typedef struct pio_recursive_dir { #define kls__pioRecursiveDir mth(fobjDispose) fobj_klass(pioRecursiveDir); -pio_recursive_dir_t* -pio_recursive_dir_alloc(pioDrive_i drive, path_t root, err_i *err) +pioRecursiveDir* +pioRecursiveDir_alloc(pioDrive_i drive, path_t root, err_i *err) { pioDirIter_i iter; fobj_reset_err(err); @@ -1833,7 +1833,7 @@ pio_recursive_dir_alloc(pioDrive_i drive, path_t root, err_i *err) } static pio_dirent_t -pio_recursive_dir_next_impl(pio_recursive_dir_t* self, err_i* err) +pio_recursive_dir_next_impl(pioRecursiveDir* self, err_i* err) { if (self->dirent.stat.pst_kind == PIO_KIND_DIRECTORY && !self->dont_recurse_current) @@ -1891,22 +1891,22 @@ pio_recursive_dir_next_impl(pio_recursive_dir_t* self, err_i* err) } pio_dirent_t -pio_recursive_dir_next(pio_recursive_dir_t* self, err_i* err) +pioRecursiveDir_next(pioRecursiveDir* dir, err_i* err) { FOBJ_FUNC_ARP(); pio_dirent_t ent; fobj_reset_err(err); - ent = pio_recursive_dir_next_impl(self, err); + ent = pio_recursive_dir_next_impl(dir, err); $iresult(*err); return ent; } void -pio_recursive_dir_dont_recurse_current(pio_recursive_dir_t* self) +pioRecursiveDir_dont_recurse_current(pioRecursiveDir* dir) { - ft_assert(self->dirent.stat.pst_kind == PIO_KIND_DIRECTORY); - self->dont_recurse_current = true; + ft_assert(dir->dirent.stat.pst_kind == PIO_KIND_DIRECTORY); + dir->dont_recurse_current = true; } static void @@ -1924,11 +1924,11 @@ pioRecursiveDir_fobjDispose(VSelf) } void -pio_recursive_dir_free(pio_recursive_dir_t* self) +pioRecursiveDir_close(pioRecursiveDir* dir) { /* we are releasing bound resources, * but self will be dealloced in FOBJ's ARP */ - pioRecursiveDir_fobjDispose(self); + pioRecursiveDir_fobjDispose(dir); } fobj_klass_handle(pioRecursiveDir); diff --git a/src/utils/file.h b/src/utils/file.h index 8ba3d235c..9a062934a 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -430,11 +430,11 @@ extern void init_pio_line_reader(pio_line_reader *r, pioRead_i source, size_t ma extern void deinit_pio_line_reader(pio_line_reader *r); extern ft_bytes_t pio_line_reader_getline(pio_line_reader *r, err_i *err); -typedef struct pio_recursive_dir pio_recursive_dir_t; -extern pio_recursive_dir_t* pio_recursive_dir_alloc(pioDrive_i drive, path_t root, err_i *err); -extern pio_dirent_t pio_recursive_dir_next(pio_recursive_dir_t* dir, err_i* err); -extern void pio_recursive_dir_dont_recurse_current(pio_recursive_dir_t* dir); -extern void pio_recursive_dir_free(pio_recursive_dir_t* dir); +typedef struct pio_recursive_dir pioRecursiveDir; +extern pioRecursiveDir* pioRecursiveDir_alloc(pioDrive_i drive, path_t root, err_i *err); +extern pio_dirent_t pioRecursiveDir_next(pioRecursiveDir* dir, err_i* err); +extern void pioRecursiveDir_dont_recurse_current(pioRecursiveDir* dir); +extern void pioRecursiveDir_close(pioRecursiveDir* dir); /* append path component */ extern bool ft_strbuf_cat_path(ft_strbuf_t *buf, ft_str_t path); From eb5ee8988000c0e461041db0f6401672b8f59293 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 22 Dec 2022 03:24:53 +0300 Subject: [PATCH 270/339] pioSyncTree - sync whole subtree --- src/pg_probackup.h | 4 ++ src/utils/file.c | 124 +++++++++++++++++++++++++++++++++++++++++++++ src/utils/file.h | 7 ++- 3 files changed, 134 insertions(+), 1 deletion(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index a0b34e870..7c12cc0b8 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1220,4 +1220,8 @@ extern int64 calculate_datasize_of_filelist(parray *filelist); #define FT_SLICE_TYPE char* #include +#define FT_SLICE str +#define FT_SLICE_TYPE ft_str_t +#include + #endif /* PG_PROBACKUP_H */ diff --git a/src/utils/file.c b/src/utils/file.c index cf01e6e36..cedc52902 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1759,6 +1759,20 @@ fio_communicate(int in, int out) } break; } + case PIO_SYNC_TREE: + { + err = $i(pioSyncTree, drive, buf); + if ($haserr(err)) + fio_send_pio_err(out, err); + else + { + hdr.size = 0; + hdr.arg = 0; + + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); + } + break; + } case PIO_CLOSE: { ft_assert(hdr.handle >= 0); @@ -2714,6 +2728,98 @@ pioLocalDrive_pioWriteFile(VSelf, path_t path, ft_bytes_t content, bool binary) return $iresult(err); } +static err_i +pioLocalDrive_pioSyncTree(VSelf, path_t root) +{ + FOBJ_FUNC_ARP(); + Self(pioLocalDrive); + pioRecursiveDir* walker; + err_i err; + pio_dirent_t dirent; + ft_strbuf_t pathbuf = ft_strbuf_zero(); + ft_arr_str_t dirs = ft_arr_init(); + ft_str_t dir; + int fd; + + walker = pioRecursiveDir_alloc($bind(pioDrive, self), root, &err); + if ($haserr(err)) + return $iresult($err(RT, "pioSyncTree: {cause}", cause(err.self))); + + while ((dirent = pioRecursiveDir_next(walker, &err)).stat.pst_kind) + { + if (dirent.stat.pst_kind == PIO_KIND_DIRECTORY) + { + ft_arr_str_push(&dirs, ft_strdup(dirent.name)); + continue; + } + ft_assert(dirent.stat.pst_kind == PIO_KIND_REGULAR); + + ft_strbuf_reset_for_reuse(&pathbuf); + ft_strbuf_catc(&pathbuf, root); + ft_strbuf_cat_path(&pathbuf, dirent.name); + + /* TODO: use fsync_fname_compat when it will return err_i */ + fd = open(pathbuf.ptr, O_RDWR, PG_BINARY); + if (fd < 0) + { + if (errno == EACCES) + continue; + err = $syserr(errno, "Couldn't open for sync {path:q}", path(pathbuf.ptr)); + goto cleanup; + } + if (fsync(fd) < 0) + { + err = $syserr(errno, "Couldn't fsync {path:q}", path(pathbuf.ptr)); + close(fd); + goto cleanup; + } + (void)close(fd); + } + pioRecursiveDir_close(walker); + if ($haserr(err)) + goto cleanup; + + /* in reverse order therefore innermost directories first */ + while (dirs.len > 0) + { + dir = ft_arr_str_pop(&dirs); + + ft_strbuf_reset_for_reuse(&pathbuf); + ft_strbuf_catc(&pathbuf, root); + ft_strbuf_cat_path(&pathbuf, dir); + + ft_str_free(&dir); + + /* TODO: use fsync_fname_compat when it will return err_i */ + fd = open(pathbuf.ptr, O_RDONLY, PG_BINARY); + if (fd < 0) + { + if (errno == EACCES || errno == EISDIR) + continue; + err = $syserr(errno, "Couldn't open for sync {path:q}", path(pathbuf.ptr)); + goto cleanup; + } + if (fsync(fd) < 0 && !(errno == EBADF || errno == EINVAL)) + { + err = $syserr(errno, "Couldn't fsync {path:q}", path(pathbuf.ptr)); + close(fd); + goto cleanup; + } + (void)close(fd); + } + +cleanup: + while (dirs.len > 0) + { + dir = ft_arr_str_pop(&dirs); + ft_str_free(&dir); + } + ft_strbuf_free(&pathbuf); + ft_arr_str_free(&dirs); + + return $iresult(err); +} + /* LOCAL FILE */ static void pioLocalFile_fobjDispose(VSelf) @@ -3429,6 +3535,24 @@ pioRemoteDrive_pioWriteFile(VSelf, path_t path, ft_bytes_t content, bool binary) return $noerr(); } +static err_i +pioRemoteDrive_pioSyncTree(VSelf, path_t root) +{ + Self(pioRemoteDrive); + fio_header hdr = { + .cop = PIO_SYNC_TREE, + .size = strlen(root)+1, + }; + IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_write_all(fio_stdout, root, hdr.size), hdr.size); + + IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + if (hdr.cop == FIO_PIO_ERROR) + return fio_receive_pio_err(&hdr); + ft_assert(hdr.cop == PIO_SYNC_TREE); + return $noerr(); +} + /* REMOTE FILE */ static err_i diff --git a/src/utils/file.h b/src/utils/file.h index 9a062934a..cfc2ff8c9 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -68,6 +68,7 @@ typedef enum PIO_DIR_OPEN, PIO_DIR_NEXT, PIO_IS_DIR_EMPTY, + PIO_SYNC_TREE, PIO_CLOSE, PIO_DISPOSE, } fio_operations; @@ -285,6 +286,9 @@ fobj_iface(pioPagesIterator); #define mth__pioWriteFile err_i, (path_t, path), (ft_bytes_t, content), \ (bool, binary, true) +/* Sync whole directories tree starting with root */ +#define mth__pioSyncTree err_i, (path_t, root) + #define mth__pioIteratePages pioPagesIterator_i, (path_t, path), \ (int, segno), (datapagemap_t, pagemap), (XLogRecPtr, start_lsn), \ (CompressAlg, calg), (int, clevel), \ @@ -308,6 +312,7 @@ fobj_method(pioRemoveDir); fobj_method(pioReadFile); fobj_method(pioWriteFile); fobj_method(pioIteratePages); +fobj_method(pioSyncTree); #define iface__pioDrive mth(pioOpenRead, pioOpenReadStream), \ mth(pioStat, pioRemove), \ @@ -318,7 +323,7 @@ fobj_method(pioIteratePages); fobj_iface(pioDrive); #define iface__pioDBDrive iface__pioDrive, mth(pioIteratePages, pioOpenWrite), \ - mth(pioRename) + mth(pioRename, pioSyncTree) fobj_iface(pioDBDrive); extern pioDrive_i pioDriveForLocation(fio_location location); From dfade6a5d5bcb48bdecab3027511317e33112da9 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 22 Dec 2022 03:38:45 +0300 Subject: [PATCH 271/339] use pioSyncTree in do_backup_pg and remove sync parameter from pioOpenRewrite in send_pages --- src/backup.c | 40 ++++++++++++++-------------------------- src/data.c | 2 +- 2 files changed, 15 insertions(+), 27 deletions(-) diff --git a/src/backup.c b/src/backup.c index ebeeed9a0..ea4327d66 100644 --- a/src/backup.c +++ b/src/backup.c @@ -106,6 +106,8 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, time_t start_time, end_time; char pretty_time[20]; char pretty_bytes[20]; + + pioSyncTree_i syncer; err_i err = $noerr(); @@ -554,37 +556,23 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, /* Sync all copied files unless '--no-sync' flag is used */ if (no_sync) elog(WARNING, "Backup files are not synced to disk"); - else + else if ($implements(pioSyncTree, current.backup_location.self, &syncer)) { + char external_dst[MAXPGPATH]; elog(INFO, "Syncing backup files to disk"); time(&start_time); - for (i = 0; i < parray_num(backup_files_list); i++) - { - char to_fullpath[MAXPGPATH]; - pgFile *file = (pgFile *) parray_get(backup_files_list, i); - - /* TODO: sync directory ? */ - if (file->kind == PIO_KIND_DIRECTORY) - continue; - - if (file->write_size <= 0) - continue; - - /* construct fullpath */ - if (file->external_dir_num == 0) - join_path_components(to_fullpath, current.database_dir, file->rel_path); - else - { - char external_dst[MAXPGPATH]; - - makeExternalDirPathByNum(external_dst, external_prefix, - file->external_dir_num); - join_path_components(to_fullpath, external_dst, file->rel_path); - } + err = $i(pioSyncTree, syncer, current.database_dir); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Syncing backup's database_dir"); - if (fio_sync(FIO_BACKUP_HOST, to_fullpath) != 0) - elog(ERROR, "Cannot sync file \"%s\": %s", to_fullpath, strerror(errno)); + for (i = 1; i <= parray_num(external_dirs); i++) + { + makeExternalDirPathByNum(external_dst, external_prefix, i); + err = $i(pioSyncTree, syncer, external_dst); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), + "Syncing backup's external dir %d", i); } time(&end_time); diff --git a/src/data.c b/src/data.c index 22f2133ca..ae70536b5 100644 --- a/src/data.c +++ b/src/data.c @@ -1769,7 +1769,7 @@ send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, if($isNULL(out)) { out = $i(pioOpenRewrite, backup_location, to_fullpath, - .use_temp = false, .sync = true, .err = &err); + .use_temp = false, .err = &err); if ($haserr(err)) return $iresult(err); crc32 = pioCRC32Counter_alloc(); From ffdd4b3cf7420af05234f0728a1efd58878d116a Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 22 Dec 2022 04:03:00 +0300 Subject: [PATCH 272/339] use pioSyncTree in catchup_sync_destination_files --- src/catchup.c | 32 +++++--------------------------- 1 file changed, 5 insertions(+), 27 deletions(-) diff --git a/src/catchup.c b/src/catchup.c index 1c62027a9..3106b7fe9 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -532,39 +532,17 @@ catchup_multithreaded_copy(int num_threads, static void catchup_sync_destination_files(const char* pgdata_path, fio_location location, parray *filelist, pgFile *pg_control_file) { - char fullpath[MAXPGPATH]; time_t start_time, end_time; char pretty_time[20]; - int i; + pioDBDrive_i drive = pioDBDriveForLocation(location); + err_i err; elog(INFO, "Syncing copied files to disk"); time(&start_time); - for (i = 0; i < parray_num(filelist); i++) - { - pgFile *file = (pgFile *) parray_get(filelist, i); - - /* TODO: sync directory ? - * - at first glance we can rely on fs journaling, - * which is enabled by default on most platforms - * - but PG itself is not relying on fs, its durable_sync - * includes directory sync - */ - if (file->kind == PIO_KIND_DIRECTORY || file->excluded) - continue; - - Assert(file->external_dir_num == 0); - join_path_components(fullpath, pgdata_path, file->rel_path); - if (fio_sync(location, fullpath) != 0) - elog(ERROR, "Cannot sync file \"%s\": %s", fullpath, strerror(errno)); - } - - /* - * sync pg_control file - */ - join_path_components(fullpath, pgdata_path, pg_control_file->rel_path); - if (fio_sync(location, fullpath) != 0) - elog(ERROR, "Cannot sync file \"%s\": %s", fullpath, strerror(errno)); + err = $i(pioSyncTree, drive, pgdata_path); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Syncing files"); time(&end_time); pretty_time_interval(difftime(end_time, start_time), From d69902ddced51f9137bd7d819b22e2624b04f74c Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 22 Dec 2022 04:22:00 +0300 Subject: [PATCH 273/339] merge.c: use temp files and rename from pioOpenRewrite though we had to add sync arguments trough backup_data_file and backup_non_data_file down to send_pages and send_file. --- src/backup.c | 5 +++-- src/catchup.c | 3 ++- src/data.c | 28 +++++++++++++++------------- src/merge.c | 32 +++----------------------------- src/pg_probackup.h | 4 ++-- 5 files changed, 25 insertions(+), 47 deletions(-) diff --git a/src/backup.c b/src/backup.c index ea4327d66..7901484aa 100644 --- a/src/backup.c +++ b/src/backup.c @@ -2035,13 +2035,14 @@ backup_files(void *arg) instance_config.compress_alg, instance_config.compress_level, arguments->nodeInfo->checksum_version, - arguments->hdr_map, false); + arguments->hdr_map, false, false); } else { backup_non_data_file(db_drive, backup_drive, file, prev_file, from_fullpath, to_fullpath, - current.backup_mode, current.parent_backup, true); + current.backup_mode, current.parent_backup, + true, false); } if (file->write_size == FILE_NOT_FOUND) diff --git a/src/catchup.c b/src/catchup.c index 3106b7fe9..b72462935 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -434,7 +434,8 @@ catchup_thread_runner(void *arg) { backup_non_data_file(drive_from, drive_to, file, dest_file, from_fullpath, to_fullpath, - arguments->backup_mode, current.parent_backup, true); + arguments->backup_mode, current.parent_backup, + true, false); } /* file went missing during catchup */ diff --git a/src/data.c b/src/data.c index ae70536b5..1569589d8 100644 --- a/src/data.c +++ b/src/data.c @@ -60,7 +60,7 @@ typedef struct backup_page_iterator { static err_i send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, XLogRecPtr prev_backup_start_lsn, CompressAlg calg, int clevel, uint32 checksum_version, - BackupPageHeader2 **headers, BackupMode backup_mode); + BackupPageHeader2 **headers, BackupMode backup_mode, bool sync); static err_i copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, XLogRecPtr sync_lsn, uint32 checksum_version, @@ -74,10 +74,11 @@ static size_t restore_data_file_internal(pioReader_i in, pioDBWriter_i out, pgFi static void backup_non_data_file_internal(pioDrive_i drive_from, pioDrive_i drive_to, const char *from_fullpath, const char *to_fullpath, pgFile *file, - bool missing_ok); + bool missing_ok, bool sync); static err_i send_file(pioDrive_i drive_from, pioDrive_i drive_to, - const char *to_fullpath, const char *from_path, bool cut_zero_tail, pgFile *file); + const char *to_fullpath, const char *from_path, + bool cut_zero_tail, pgFile *file, bool sync); #ifdef HAVE_LIBZ /* Implementation of zlib compression method */ @@ -381,7 +382,7 @@ void backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpath, XLogRecPtr prev_backup_start_lsn, BackupMode backup_mode, CompressAlg calg, int clevel, uint32 checksum_version, - HeaderMap *hdr_map, bool is_merge) + HeaderMap *hdr_map, bool is_merge, bool sync) { /* page headers */ BackupPageHeader2 *headers = NULL; @@ -438,7 +439,7 @@ backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpat /* TODO: stop handling errors internally */ err = send_pages(to_fullpath, from_fullpath, file, start_lsn, calg, clevel, checksum_version, - &headers, backup_mode); + &headers, backup_mode, sync); if ($haserr(err)) { @@ -553,7 +554,7 @@ backup_non_data_file(pioDrive_i drive_from, pioDrive_i drive_to, pgFile *file, pgFile *prev_file, const char *from_fullpath, const char *to_fullpath, BackupMode backup_mode, time_t parent_backup_time, - bool missing_ok) + bool missing_ok, bool sync) { FOBJ_FUNC_ARP(); err_i err; @@ -599,7 +600,7 @@ backup_non_data_file(pioDrive_i drive_from, pioDrive_i drive_to, } backup_non_data_file_internal(drive_from, drive_to, from_fullpath, - to_fullpath, file, missing_ok); + to_fullpath, file, missing_ok, sync); } /* @@ -1175,7 +1176,7 @@ void backup_non_data_file_internal(pioDrive_i drive_from, pioDrive_i drive_to, const char *from_fullpath, const char *to_fullpath, pgFile *file, - bool missing_ok) + bool missing_ok, bool sync) { bool cut_zero_tail; err_i err; @@ -1190,7 +1191,8 @@ backup_non_data_file_internal(pioDrive_i drive_from, pioDrive_i drive_to, file->uncompressed_size = 0; /* backup non-data file */ - err = send_file(drive_from, drive_to, to_fullpath, from_fullpath, cut_zero_tail, file); + err = send_file(drive_from, drive_to, to_fullpath, from_fullpath, + cut_zero_tail, file, sync); /* handle errors */ if($haserr(err)) { @@ -1208,7 +1210,7 @@ backup_non_data_file_internal(pioDrive_i drive_from, pioDrive_i drive_to, } static err_i -send_file(pioDrive_i db_drive, pioDrive_i backup_drive, const char *to_fullpath, const char *from_fullpath, bool cut_zero_tail, pgFile *file) { +send_file(pioDrive_i db_drive, pioDrive_i backup_drive, const char *to_fullpath, const char *from_fullpath, bool cut_zero_tail, pgFile *file, bool sync) { FOBJ_FUNC_ARP(); err_i err = $noerr(); pioReadStream_i in; @@ -1216,7 +1218,7 @@ send_file(pioDrive_i db_drive, pioDrive_i backup_drive, const char *to_fullpath, /* open to_fullpath */ out = $i(pioOpenRewrite, backup_drive, .path = to_fullpath, - .permissions = file->mode, .err = &err); + .permissions = file->mode, .sync = sync, .err = &err); if($haserr(err)) elog(ERROR, "Cannot open destination file \"%s\": %s", @@ -1737,7 +1739,7 @@ static err_i send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, XLogRecPtr prev_backup_start_lsn, CompressAlg calg, int clevel, uint32 checksum_version, BackupPageHeader2 **headers, - BackupMode backup_mode) + BackupMode backup_mode, bool sync) { FOBJ_FUNC_ARP(); pioDrive_i backup_location = pioDriveForLocation(FIO_BACKUP_HOST); @@ -1769,7 +1771,7 @@ send_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, if($isNULL(out)) { out = $i(pioOpenRewrite, backup_location, to_fullpath, - .use_temp = false, .err = &err); + .sync = sync, .err = &err); if ($haserr(err)) return $iresult(err); crc32 = pioCRC32Counter_alloc(); diff --git a/src/merge.c b/src/merge.c index 72131981b..9b727941f 100644 --- a/src/merge.c +++ b/src/merge.c @@ -1180,7 +1180,6 @@ merge_data_file(parray *parent_chain, pgBackup *full_backup, { char to_fullpath[MAXPGPATH]; char to_fullpath_tmp1[MAXPGPATH]; /* used for restore */ - char to_fullpath_tmp2[MAXPGPATH]; /* used for backup */ pioDBDrive_i drive = pioDBDriveForLocation(FIO_BACKUP_HOST); pioDBWriter_i out; err_i err; @@ -1193,7 +1192,6 @@ merge_data_file(parray *parent_chain, pgBackup *full_backup, /* set fullpath of destination file and temp files */ join_path_components(to_fullpath, full_database_dir, tmp_file->rel_path); snprintf(to_fullpath_tmp1, MAXPGPATH, "%s_tmp1", to_fullpath); - snprintf(to_fullpath_tmp2, MAXPGPATH, "%s_tmp2", to_fullpath); /* open temp file */ out = $i(pioOpenWrite, drive, to_fullpath_tmp1, .err = &err); @@ -1218,11 +1216,11 @@ merge_data_file(parray *parent_chain, pgBackup *full_backup, * 2 backups of old versions, where n_blocks is missing. */ - backup_data_file(tmp_file, to_fullpath_tmp1, to_fullpath_tmp2, + backup_data_file(tmp_file, to_fullpath_tmp1, to_fullpath, InvalidXLogRecPtr, BACKUP_MODE_FULL, dest_backup->compress_alg, dest_backup->compress_level, dest_backup->checksum_version, - &(full_backup->hdr_map), true); + &(full_backup->hdr_map), true, !no_sync); /* drop restored temp file */ if (unlink(to_fullpath_tmp1) == -1) @@ -1244,16 +1242,6 @@ merge_data_file(parray *parent_chain, pgBackup *full_backup, if (tmp_file->write_size == 0) return; - /* sync second temp file to disk */ - if (!no_sync && fio_sync(FIO_BACKUP_HOST, to_fullpath_tmp2) != 0) - elog(ERROR, "Cannot sync merge temp file \"%s\": %s", - to_fullpath_tmp2, strerror(errno)); - - /* Do atomic rename from second temp file to destination file */ - if (rename(to_fullpath_tmp2, to_fullpath) == -1) - elog(ERROR, "Could not rename file \"%s\" to \"%s\": %s", - to_fullpath_tmp2, to_fullpath, strerror(errno)); - /* drop temp file */ unlink(to_fullpath_tmp1); } @@ -1271,7 +1259,6 @@ merge_non_data_file(parray *parent_chain, pgBackup *full_backup, { int i; char to_fullpath[MAXPGPATH]; - char to_fullpath_tmp[MAXPGPATH]; /* used for backup */ char from_fullpath[MAXPGPATH]; pgBackup *from_backup = NULL; pgFile *from_file = NULL; @@ -1287,8 +1274,6 @@ merge_non_data_file(parray *parent_chain, pgBackup *full_backup, else join_path_components(to_fullpath, full_database_dir, dest_file->rel_path); - snprintf(to_fullpath_tmp, MAXPGPATH, "%s_tmp", to_fullpath); - /* * Iterate over parent chain starting from direct parent of destination * backup to oldest backup in chain, and look for the first @@ -1349,18 +1334,7 @@ merge_non_data_file(parray *parent_chain, pgBackup *full_backup, /* Copy file to FULL backup directory into temp file */ backup_non_data_file(full_backup->backup_location, dest_backup->backup_location, tmp_file, NULL, from_fullpath, - to_fullpath_tmp, BACKUP_MODE_FULL, 0, false); - - /* sync temp file to disk */ - if (!no_sync && fio_sync(FIO_BACKUP_HOST, to_fullpath_tmp) != 0) - elog(ERROR, "Cannot sync merge temp file \"%s\": %s", - to_fullpath_tmp, strerror(errno)); - - /* Do atomic rename from second temp file to destination file */ - if (rename(to_fullpath_tmp, to_fullpath) == -1) - elog(ERROR, "Could not rename file \"%s\" to \"%s\": %s", - to_fullpath_tmp, to_fullpath, strerror(errno)); - + to_fullpath, BACKUP_MODE_FULL, 0, false, !no_sync); } /* diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 7c12cc0b8..3865867b5 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1036,12 +1036,12 @@ extern void catchup_data_file(pgFile *file, const char *from_fullpath, const cha extern void backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpath, XLogRecPtr prev_backup_start_lsn, BackupMode backup_mode, CompressAlg calg, int clevel, uint32 checksum_version, - HeaderMap *hdr_map, bool missing_ok); + HeaderMap *hdr_map, bool missing_ok, bool sync); extern void backup_non_data_file(pioDrive_i from, pioDrive_i to, pgFile *file, pgFile *prev_file, const char *from_fullpath, const char *to_fullpath, BackupMode backup_mode, time_t parent_backup_time, - bool missing_ok); + bool missing_ok, bool sync); extern size_t restore_data_file(parray *parent_chain, pgFile *dest_file, pioDBWriter_i out, const char *to_fullpath, bool use_bitmap, PageState *checksum_map, From 90cd7250a23afc5bc5cbce9e71c456bf7c4e9adc Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 22 Dec 2022 04:30:00 +0300 Subject: [PATCH 274/339] use pioSyncTree in restore_chain --- src/restore.c | 40 ++++++++++------------------------------ 1 file changed, 10 insertions(+), 30 deletions(-) diff --git a/src/restore.c b/src/restore.c index 8734d785a..9c951a10d 100644 --- a/src/restore.c +++ b/src/restore.c @@ -1051,40 +1051,20 @@ restore_chain(InstanceState *instanceState, elog(WARNING, "Restored files are not synced to disk"); else { + pioDBDrive_i dbdrive = $bind(pioDBDrive, instanceState->database_location.self); elog(INFO, "Syncing restored files to disk"); time(&start_time); - for (i = 0; i < parray_num(dest_files); i++) - { - char to_fullpath[MAXPGPATH]; - pgFile *dest_file = (pgFile *) parray_get(dest_files, i); - - if (dest_file->kind == PIO_KIND_DIRECTORY) - continue; - - /* skip external files if ordered to do so */ - if (dest_file->external_dir_num > 0 && - params->skip_external_dirs) - continue; - - /* construct fullpath */ - if (dest_file->external_dir_num == 0) - { - if (strcmp(PG_TABLESPACE_MAP_FILE, dest_file->rel_path) == 0) - continue; - if (strcmp(DATABASE_MAP, dest_file->rel_path) == 0) - continue; - join_path_components(to_fullpath, pgdata_path, dest_file->rel_path); - } - else - { - char *external_path = parray_get(external_dirs, dest_file->external_dir_num - 1); - join_path_components(to_fullpath, external_path, dest_file->rel_path); - } + err = $i(pioSyncTree, dbdrive, .root = pgdata_path); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Syncing pgdata"); - /* TODO: write test for case: file to be synced is missing */ - if (fio_sync(FIO_DB_HOST, to_fullpath) != 0) - elog(ERROR, "Failed to sync file \"%s\": %s", to_fullpath, strerror(errno)); + for (i = 0; i < parray_num(external_dirs); i++) + { + char *external_path = parray_get(external_dirs, i); + err = $i(pioSyncTree, dbdrive, .root = external_path); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Syncin external dir %d", i); } time(&end_time); From 1da2b77f9277d491e7d8a1f833f82be5bb9534c8 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 22 Dec 2022 04:58:58 +0300 Subject: [PATCH 275/339] remove fio_sync --- src/utils/file.c | 60 ------------------------------------------------ src/utils/file.h | 3 --- 2 files changed, 63 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index cedc52902..107815bc1 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -632,49 +632,6 @@ fio_rename_impl(char const* old_path, const char* new_path, int out) IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); } -/* Sync file to disk */ -int -fio_sync(fio_location location, const char* path) -{ - if (fio_is_remote(location)) - { - fio_header hdr; - size_t path_len = strlen(path) + 1; - hdr.cop = FIO_SYNC; - hdr.handle = -1; - hdr.size = path_len; - - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(fio_stdout, path, path_len), path_len); - IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); - - if (hdr.arg != 0) - { - errno = hdr.arg; - return -1; - } - - return 0; - } - else - { - int fd; - - fd = open(path, O_WRONLY | PG_BINARY, FILE_PERMISSION); - if (fd < 0) - return -1; - - if (fsync(fd) < 0) - { - close(fd); - return -1; - } - close(fd); - - return 0; - } -} - enum { GET_CRC32_DECOMPRESS = 1, GET_CRC32_TRUNCATED = 2 @@ -1343,7 +1300,6 @@ fio_communicate(int in, int out) ft_str_t path; ft_str_t path2; int rc; - int tmp_fd; pg_crc32 crc; err_i err = $noerr(); @@ -1482,22 +1438,6 @@ fio_communicate(int in, int out) case FIO_SEND_FILE_CONTENT: fio_send_file_content_impl(fd[hdr.handle], out, buf); break; - case FIO_SYNC: - /* open file and fsync it */ - tmp_fd = open(buf, O_WRONLY | PG_BINARY, FILE_PERMISSION); - if (tmp_fd < 0) - hdr.arg = errno; - else - { - if (fsync(tmp_fd) == 0) - hdr.arg = 0; - else - hdr.arg = errno; - } - close(tmp_fd); - - IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); - break; case PIO_GET_CRC32: crc = $i(pioGetCRC32, drive, .path = buf, .compressed = (hdr.arg & GET_CRC32_DECOMPRESS) != 0, diff --git a/src/utils/file.h b/src/utils/file.h index cfc2ff8c9..1ef6d6e29 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -27,7 +27,6 @@ typedef enum FIO_OPEN, FIO_CLOSE, FIO_WRITE, - FIO_SYNC, FIO_RENAME, FIO_SYMLINK, FIO_REMOVE, @@ -156,8 +155,6 @@ extern void fio_error(int rc, int size, const char* file, int line); extern void fio_get_agent_version(int* protocol, char* payload_buf, size_t payload_buf_size); /* pathname-style functions */ -extern int fio_sync(fio_location location, const char* path); - extern int fio_symlink(fio_location location, const char* target, const char* link_path, bool overwrite); extern int fio_remove(fio_location location, const char* path, bool missing_ok); extern ssize_t fio_readlink(fio_location location, const char *path, char *value, size_t valsiz); From dfc7f0466b05ea95f36d193721cb3023835bde1c Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 22 Dec 2022 05:35:04 +0300 Subject: [PATCH 276/339] fix show command: use pioOpenDir in catalog_get_instance_list --- src/catalog.c | 46 ++++++++++++++++------------------------------ 1 file changed, 16 insertions(+), 30 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index 8701ce210..deeadb55d 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -841,56 +841,42 @@ catalog_new(const char *backup_path) parray * catalog_get_instance_list(CatalogState *catalogState) { - DIR *dir; - struct dirent *dent; + FOBJ_FUNC_ARP(); + pioDirIter_i data_dir; + pio_dirent_t ent; + err_i err = $noerr(); parray *instances; instances = parray_new(); /* open directory and list contents */ - dir = opendir(catalogState->backup_subdir_path); - if (dir == NULL) - elog(ERROR, "Cannot open directory \"%s\": %s", - catalogState->backup_subdir_path, strerror(errno)); + data_dir = $i(pioOpenDir, catalogState->backup_location, + catalogState->backup_subdir_path, .err = &err); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Failed to get backup list"); - while (errno = 0, (dent = readdir(dir)) != NULL) + while ((ent = $i(pioDirNext, data_dir, .err=&err)).stat.pst_kind) { - char child[MAXPGPATH]; - struct stat st; InstanceState *instanceState = NULL; - /* skip entries point current dir or parent dir */ - if (strcmp(dent->d_name, ".") == 0 || - strcmp(dent->d_name, "..") == 0) + if (ent.stat.pst_kind != PIO_KIND_DIRECTORY) continue; - join_path_components(child, catalogState->backup_subdir_path, dent->d_name); - - if (lstat(child, &st) == -1) - elog(ERROR, "Cannot stat file \"%s\": %s", - child, strerror(errno)); - - if (!S_ISDIR(st.st_mode)) - continue; - - instanceState = makeInstanceState(catalogState, dent->d_name); + instanceState = makeInstanceState(catalogState, ent.name.ptr); instanceState->config = readInstanceConfigFile(instanceState); parray_append(instances, instanceState); } + $i(pioClose, data_dir); // ignore error + + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Read backup root directory"); + /* TODO 3.0: switch to ERROR */ if (parray_num(instances) == 0) elog(WARNING, "This backup catalog contains no backup instances. Backup instance can be added via 'add-instance' command."); - if (errno) - elog(ERROR, "Cannot read directory \"%s\": %s", - catalogState->backup_subdir_path, strerror(errno)); - - if (closedir(dir)) - elog(ERROR, "Cannot close directory \"%s\": %s", - catalogState->backup_subdir_path, strerror(errno)); - return instances; } From 67b406cc2b2db39902415a7e38f98fc38e73aa75 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 22 Dec 2022 06:20:50 +0300 Subject: [PATCH 277/339] adapt do_validate_all to pio: use pioOpenDir --- src/validate.c | 43 +++++++++++++++++-------------------------- 1 file changed, 17 insertions(+), 26 deletions(-) diff --git a/src/validate.c b/src/validate.c index d3a822194..f10029696 100644 --- a/src/validate.c +++ b/src/validate.c @@ -414,6 +414,7 @@ pgBackupValidateFiles(void *arg) int do_validate_all(CatalogState *catalogState, InstanceState *instanceState) { + FOBJ_FUNC_ARP(); corrupted_backup_found = false; skipped_due_to_lock = false; err_i err; @@ -421,40 +422,24 @@ do_validate_all(CatalogState *catalogState, InstanceState *instanceState) if (instanceState == NULL) { /* Show list of instances */ - DIR *dir; - struct dirent *dent; + pioDirIter_i data_dir; + pio_dirent_t ent; /* open directory and list contents */ - dir = opendir(catalogState->backup_subdir_path); - if (dir == NULL) - elog(ERROR, "cannot open directory \"%s\": %s", catalogState->backup_subdir_path, strerror(errno)); + data_dir = $i(pioOpenDir, catalogState->backup_location, + catalogState->backup_subdir_path, .err = &err); + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Failed to get backup list"); - errno = 0; - while ((dent = readdir(dir))) + while ((ent = $i(pioDirNext, data_dir, .err=&err)).stat.pst_kind) { FOBJ_LOOP_ARP(); - char child[MAXPGPATH]; - struct stat st; - InstanceState *instanceState; - - - /* skip entries point current dir or parent dir */ - if (strcmp(dent->d_name, ".") == 0 || - strcmp(dent->d_name, "..") == 0) - continue; + InstanceState *instanceState = NULL; - join_path_components(child, catalogState->backup_subdir_path, dent->d_name); - - if (lstat(child, &st) == -1) - elog(ERROR, "cannot stat file \"%s\": %s", child, strerror(errno)); - - if (!S_ISDIR(st.st_mode)) + if (ent.stat.pst_kind != PIO_KIND_DIRECTORY) continue; - /* - * Initialize instance configuration. - */ - instanceState = makeInstanceState(catalogState, dent->d_name); + instanceState = makeInstanceState(catalogState, ent.name.ptr); if (config_read_opt(catalogState->backup_location, instanceState->instance_config_path, instance_options, ERROR, false, &err) == 0) @@ -469,6 +454,12 @@ do_validate_all(CatalogState *catalogState, InstanceState *instanceState) do_validate_instance(instanceState); pgut_free(instanceState); } + + $i(pioClose, data_dir); // ignore error + + if ($haserr(err)) + ft_logerr(FT_FATAL, $errmsg(err), "Read backup root directory"); + } else { From c6132c1f8a16e9ff3547207d01a979ddd567349d Mon Sep 17 00:00:00 2001 From: Sergey Fukanchik Date: Thu, 22 Dec 2022 12:22:31 +0300 Subject: [PATCH 278/339] PBCKP-80 free memory --- src/configure.c | 2 ++ src/data.c | 3 +-- src/dir.c | 5 +++++ src/validate.c | 1 + 4 files changed, 9 insertions(+), 2 deletions(-) diff --git a/src/configure.c b/src/configure.c index d338d5ea1..5d328ab20 100644 --- a/src/configure.c +++ b/src/configure.c @@ -359,6 +359,8 @@ do_set_config(InstanceState *instanceState) if ($haserr(err)) ft_logerr(FT_FATAL, $errmsg(err), "Writting configuration file"); + + ft_strbuf_free(&buf); } void diff --git a/src/data.c b/src/data.c index 1569589d8..715ab5dab 100644 --- a/src/data.c +++ b/src/data.c @@ -1426,7 +1426,6 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, FOBJ_FUNC_ARP(); bool is_valid = true; pg_crc32 crc; - BackupPageHeader2 *headers = NULL; pioDrive_i drive; err_i err; @@ -1564,7 +1563,7 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, is_valid = false; } - pg_free(headers); + pg_free(iter.headers); return is_valid; } diff --git a/src/dir.c b/src/dir.c index c450dac3b..69e423563 100644 --- a/src/dir.c +++ b/src/dir.c @@ -996,6 +996,11 @@ check_tablespace_mapping(pgBackup *backup, bool incremental, bool force, bool pg if (tablespace_dirs.head != NULL) elog(ERROR, "Backup %s has no tablespaceses, nothing to remap " "via \"--tablespace-mapping\" option", backup_id_of(backup)); + + free(tmp_file); + parray_walk(links, pgFileFree); + parray_free(links); + return NoTblspc; } diff --git a/src/validate.c b/src/validate.c index f10029696..d7fde27c0 100644 --- a/src/validate.c +++ b/src/validate.c @@ -745,6 +745,7 @@ validate_tablespace_map(pgBackup *backup, bool no_validate) if (!tablespace_map) { elog(LOG, "there is no file tablespace_map"); + pgFileFree(dummy); parray_walk(files, pgFileFree); parray_free(files); return false; From bc7a88b92f8d16f72307d3adb7e466e7b7b9d1f8 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 22 Dec 2022 15:29:20 +0300 Subject: [PATCH 279/339] ability to alloc completely custom error --- src/fu_util/fo_obj.h | 6 +++++- src/fu_util/impl/fo_impl.c | 39 +++++++++++++++++++++++++++++++++++++ src/fu_util/impl/fo_impl2.h | 11 ++++++++++- 3 files changed, 54 insertions(+), 2 deletions(-) diff --git a/src/fu_util/fo_obj.h b/src/fu_util/fo_obj.h index f843820f4..49a6c6999 100644 --- a/src/fu_util/fo_obj.h +++ b/src/fu_util/fo_obj.h @@ -564,6 +564,10 @@ fobj_error_cstr_key(__msgSuffix); * $haserr(err) - true if $notNULL(err) */ #define $haserr(err) $notNULL(err) +/* + * $err_has_kind(kind, err) + */ +#define $err_has_kind(kind, err) fobj_err_has_kind(kind, err) /* * $syserr(errno) @@ -586,7 +590,7 @@ ft_inline const char* getErrnoStr(err_i err); /* * Get error type */ -#define $errtype(err) fobj_errtype(err) +#define $errkind(err) fobj_errkind(err) /* * Get error message diff --git a/src/fu_util/impl/fo_impl.c b/src/fu_util/impl/fo_impl.c index c6446e91d..7e87c9f95 100644 --- a/src/fu_util/impl/fo_impl.c +++ b/src/fu_util/impl/fo_impl.c @@ -1132,6 +1132,39 @@ fobj__make_err(const char *type, return bind_err(err); } +err_i +fobj__alloc_err(const char *type, + ft_source_position_t src, + const char *msg, + fobj_err_kv_t *kvs, + size_t kvn) { + fobjErr* err; + fobj_err_kv_t* kv; + + src.func = ft_cstrdup(src.func); + src.file = ft_cstrdup(src.file); + err = fobj_alloc_sized(fobjErr, + ft_mul_size(sizeof(*kvs), kvn+1), + .type = ft_cstrdup(type), + .message = ft_cstrdup(msg), + .src = src, + .free_type_and_src = true, + ); + memcpy(err->kv, kvs, sizeof(*kvs)*kvn); + /* search for suffix */ + for (kv = err->kv; kv->key; kv++) { + switch (ft_arg_type(kv->val)) { + case 'o': + $ref(ft_arg_o(kv->val)); + break; + case 's': + kv->val.v.s = kv->val.v.s ? ft_cstrdup(kv->val.v.s) : NULL; + break; + } + } + return bind_err(err); +} + static void fobjErr__fobjErr_marker_DONT_IMPLEMENT_ME(VSelf) { } @@ -1150,6 +1183,12 @@ fobjErr_fobjDispose(VSelf) { break; } } + if (self->free_type_and_src) + { + ft_free((void*)self->type); + ft_free((void*)self->src.file); + ft_free((void*)self->src.func); + } ft_free((void*)self->message); $del(&self->sibling); } diff --git a/src/fu_util/impl/fo_impl2.h b/src/fu_util/impl/fo_impl2.h index a9495e3ca..16e0a1921 100644 --- a/src/fu_util/impl/fo_impl2.h +++ b/src/fu_util/impl/fo_impl2.h @@ -124,6 +124,7 @@ struct fobjErr { const char* message; ft_source_position_t src; fobjErr* sibling; /* sibling error */ + bool free_type_and_src; fobj_err_kv_t kv[]; }; @@ -176,6 +177,14 @@ extern err_i fobj__make_err(const char *type, const char *msg, fobj_err_kv_t *kvs, size_t kvn); +extern err_i fobj__alloc_err(const char *type, + ft_source_position_t src, + const char *msg, + fobj_err_kv_t *kvs, + size_t kvn); + +#define fobj_err_has_kind(kind, err) \ + (!$noerr(err) && strcmp(fobj_error_kind_##kind(), $errkind(err)) == 0) #define fobj__err_transform_kv_do(v) \ fobj__err_mkkv_##v @@ -196,7 +205,7 @@ getErrnoStr(err_i err) { } ft_inline const char* -fobj_errtype(err_i err) { +fobj_errkind(err_i err) { fobjErr* self = (fobjErr*)(err.self); ft_assert(fobj_real_klass_of(self) == fobjErr__kh()); \ return self->type ? self->type : "RT"; From a10d19a64093835a9cfbb7f35526c87e407f1b41 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 22 Dec 2022 15:30:31 +0300 Subject: [PATCH 280/339] more complete send of error through remote connection --- src/utils/file.c | 60 ++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 48 insertions(+), 12 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index 107815bc1..0a3b99058 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -746,28 +746,64 @@ fio_mkdir_impl(const char* path, int mode, bool strict, int out) static void fio_send_pio_err(int out, err_i err) { - const char *err_msg = $errmsg(err); - fio_header hdr = {.cop = FIO_PIO_ERROR, .size = strlen(err_msg) + 1, .arg = getErrno(err)}; + ft_strbuf_t load = ft_strbuf_zero(); + ft_source_position_t src = $errsrc(err); + fio_header hdr = { + .cop = FIO_PIO_ERROR, + .arg = (getErrno(err) & 0xff) | (src.line << 8), + }; + + + ft_strbuf_catc_zt(&load, $errkind(err)); + ft_strbuf_catc_zt(&load, $errmsg(err)); + ft_strbuf_catc_zt(&load, src.file); + ft_strbuf_catc_zt(&load, src.func); + + hdr.size = load.len; IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(out, err_msg, hdr.size), hdr.size); + IO_CHECK(fio_write_all(out, load.ptr, load.len), load.len); - /* We also need to send source location and all the KVs */ + /* We also need to send all the KVs */ + ft_strbuf_free(&load); } static err_i fio_receive_pio_err(fio_header *hdr) { - int pio_errno = hdr->arg; - char *err_msg = pg_malloc(hdr->size); - - IO_CHECK(fio_read_all(fio_stdin, err_msg, hdr->size), hdr->size); + int pio_errno = hdr->arg & 0xff; + ft_bytes_t load = ft_bytes_alloc(hdr->size); + ft_bytes_t parse; + ft_strbuf_t rmsg = ft_strbuf_init_str(ft_cstr("(remote) ")); + ft_str_t kind; + ft_str_t msg; + ft_str_t file; + int line = hdr->arg >> 8; + ft_str_t func; + fobj_err_kv_t kvs[] = {{.key="errNo", .val=ft_mka_i(pio_errno)}}; + err_i err; - if (pio_errno) - return $err(SysErr, "(remote) {causeStr}", - causeStr(err_msg), errNo(pio_errno)); + IO_CHECK(fio_read_all(fio_stdin, load.ptr, load.len), load.len); + parse = load; + kind = ft_bytes_shift_zt(&parse); + msg = ft_bytes_shift_zt(&parse); + file = ft_bytes_shift_zt(&parse); + func = ft_bytes_shift_zt(&parse); + ft_strbuf_cat(&rmsg, msg); + + err = fobj__alloc_err(kind.ptr, + (ft_source_position_t){ + .file = file.ptr, + .line = line, + .func = func.ptr, + }, + rmsg.ptr, + kvs, pio_errno ? 1 : 0); + + ft_bytes_free(&load); + ft_strbuf_free(&rmsg); - return $err(RT, "(remote) {causeStr}", causeStr(err_msg)); + return err; } static void From 86609c2d30fa0a2c61be5dc96264079703ed0cbb Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 22 Dec 2022 15:31:00 +0300 Subject: [PATCH 281/339] archive.c: allow to overwrite broken gz file --- src/archive.c | 19 +++++++++++++++---- tests/archive_test.py | 37 +++++++++++++++++++++++++------------ 2 files changed, 40 insertions(+), 16 deletions(-) diff --git a/src/archive.c b/src/archive.c index f958c1903..560308a8f 100644 --- a/src/archive.c +++ b/src/archive.c @@ -420,10 +420,21 @@ push_file_internal(const char *wal_file_name, const char *pg_xlog_dir, crc32_dst = $i(pioGetCRC32, backup_drive, to_fullpath, .compressed = is_compress, .err = &err); - if ($haserr(err)) - return $iresult(err); - - if (crc32_src == crc32_dst) + if ($err_has_kind(GZ, err) && overwrite) + { + elog(LOG, "WAL file already exists and looks like it is damaged, overwriting: %s", + $errmsg(err)); + } + else if ($err_has_kind(GZ, err)) + { + return $iresult($err(RT, "WAL file already exists and looks like it is damaged: {cause}", + cause(err.self))); + } + else if ($haserr(err)) + { + return $iresult(err); + } + else if (crc32_src == crc32_dst) { elog(LOG, "WAL file already exists in archive with the same " "checksum, skip pushing: \"%s\"", from_fullpath); diff --git a/tests/archive_test.py b/tests/archive_test.py index af7bef721..4e9cc90f2 100644 --- a/tests/archive_test.py +++ b/tests/archive_test.py @@ -387,9 +387,14 @@ def test_archive_push_file_exists(self): 'pg_probackup archive-push WAL file', log_content) - self.assertIn( - 'WAL file already exists in archive with different checksum', - log_content) + if self.archive_compress: + self.assertIn( + 'WAL file already exists and looks like it is damaged', + log_content) + else: + self.assertIn( + 'WAL file already exists in archive with different checksum', + log_content) self.assertNotIn( 'pg_probackup archive-push completed successfully', log_content) @@ -466,12 +471,15 @@ def test_archive_push_file_exists_overwrite(self): 'DETAIL: The failed archive command was:', log_content) self.assertIn( 'pg_probackup archive-push WAL file', log_content) - self.assertNotIn( - 'WAL file already exists in archive with ' - 'different checksum, overwriting', log_content) - self.assertIn( - 'WAL file already exists in archive with ' - 'different checksum', log_content) + self.assertNotIn('overwriting', log_content) + if self.archive_compress: + self.assertIn( + 'WAL file already exists and looks like ' + 'it is damaged', log_content) + else: + self.assertIn( + 'WAL file already exists in archive with ' + 'different checksum', log_content) self.assertNotIn( 'pg_probackup archive-push completed successfully', log_content) @@ -487,9 +495,14 @@ def test_archive_push_file_exists_overwrite(self): 'pg_probackup archive-push completed successfully' in log_content, 'Expecting messages about successfull execution archive_command') - self.assertIn( - 'WAL file already exists in archive with ' - 'different checksum, overwriting', log_content) + if self.archive_compress: + self.assertIn( + 'WAL file already exists and looks like ' + 'it is damaged, overwriting', log_content) + else: + self.assertIn( + 'WAL file already exists in archive with ' + 'different checksum, overwriting', log_content) @unittest.skip("should be redone with file locking") def test_archive_push_part_file_exists_not_stale(self): From 7b375352ee99ce196ae3700685327ccceb9c8aef Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 22 Dec 2022 17:34:05 +0300 Subject: [PATCH 282/339] improvement to ptrack_helpers.GDBobj autoclose it __del__ --- tests/helpers/ptrack_helpers.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 2a9526eaf..936b55458 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1961,6 +1961,7 @@ class GDBobj: def __init__(self, cmd, env, attach=False): self.verbose = env.verbose self.output = '' + self._did_quit = False # Check gdb flag is set up if not env.gdb: @@ -2019,6 +2020,13 @@ def __init__(self, cmd, env, attach=False): else: break + def __del__(self): + if not self._did_quit: + try: + self.quit() + except subprocess.TimeoutExpired: + self.kill() + def get_line(self): line = self.proc.stdout.readline() self.output += line @@ -2026,7 +2034,9 @@ def get_line(self): def kill(self): self.proc.kill() - self.proc.wait() + self.proc.wait(3) + self.proc.stdin.close() + self.proc.stdout.close() def set_breakpoint(self, location): @@ -2154,7 +2164,12 @@ def stopped_in_breakpoint(self): return False def quit(self): - self.proc.terminate() + if not self._did_quit: + self._did_quit = True + self.proc.terminate() + self.proc.wait(3) + self.proc.stdin.close() + self.proc.stdout.close() # use for breakpoint, run, continue def _execute(self, cmd, running=True): From da99c0258265acbfd1d8b4ca096d9b5c33db5479 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 22 Dec 2022 17:35:15 +0300 Subject: [PATCH 283/339] speedup pgpro434_3 and pgpro434_4 a bit --- tests/archive_test.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/tests/archive_test.py b/tests/archive_test.py index 4e9cc90f2..b26f58909 100644 --- a/tests/archive_test.py +++ b/tests/archive_test.py @@ -226,7 +226,7 @@ def test_pgpro434_3(self): gdb = self.backup_node( backup_dir, 'node', node, options=[ - "--archive-timeout=60", + "--archive-timeout=10", "--log-level-file=LOG"], gdb=True) @@ -237,6 +237,8 @@ def test_pgpro434_3(self): self.set_auto_conf(node, {'archive_command': 'exit 1'}) node.reload() + sleep(1) + gdb.continue_execution_until_exit() sleep(1) @@ -246,7 +248,7 @@ def test_pgpro434_3(self): log_content = f.read() self.assertIn( - "ERROR: WAL segment 000000010000000000000003 could not be archived in 60 seconds", + "ERROR: WAL segment 000000010000000000000003 could not be archived in 10 seconds", log_content) log_file = os.path.join(node.logs_dir, 'postgresql.log') @@ -281,7 +283,7 @@ def test_pgpro434_4(self): gdb = self.backup_node( backup_dir, 'node', node, options=[ - "--archive-timeout=60", + "--archive-timeout=10", "--log-level-file=info"], gdb=True) @@ -310,7 +312,6 @@ def test_pgpro434_4(self): postgres_gdb.continue_execution_until_running() gdb.continue_execution_until_exit() - # gdb._execute('detach') log_file = os.path.join(backup_dir, 'log', 'pg_probackup.log') with open(log_file, 'r') as f: @@ -318,11 +319,11 @@ def test_pgpro434_4(self): if self.get_version(node) < 150000: self.assertIn( - "ERROR: pg_stop_backup doesn't answer in 60 seconds, cancel it", + "ERROR: pg_stop_backup doesn't answer in 10 seconds, cancel it", log_content) else: self.assertIn( - "ERROR: pg_backup_stop doesn't answer in 60 seconds, cancel it", + "ERROR: pg_backup_stop doesn't answer in 10 seconds, cancel it", log_content) log_file = os.path.join(node.logs_dir, 'postgresql.log') From dd20305c5a342f99ef371733f7e8fa3cd4cd4c2f Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 22 Dec 2022 20:07:43 +0300 Subject: [PATCH 284/339] [PBCKP-426] rename pioLocalFile->pioLocalReadFile just for symmetry with pioLocalWriteFile --- src/utils/file.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index 0a3b99058..425b0ecf9 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1944,13 +1944,13 @@ typedef struct pioFile #define kls__pioFile mth(fobjDispose) fobj_klass(pioFile); -typedef struct pioLocalFile +typedef struct pioLocalReadFile { pioFile p; int fd; -} pioLocalFile; -#define kls__pioLocalFile iface__pioReader, iface(pioReader, pioReadStream) -fobj_klass(pioLocalFile); +} pioLocalReadFile; +#define kls__pioLocalReadFile iface__pioReader, iface(pioReader, pioReadStream) +fobj_klass(pioLocalReadFile); typedef struct pioLocalWriteFile { @@ -2167,7 +2167,7 @@ pioLocalDrive_pioOpenRead(VSelf, path_t path, err_i *err) return (pioReader_i){NULL}; } - file = $alloc(pioLocalFile, .fd = fd, + file = $alloc(pioLocalReadFile, .fd = fd, .p = { .path = ft_cstrdup(path) } ); return $bind(pioReader, file); } @@ -2798,9 +2798,9 @@ pioLocalDrive_pioSyncTree(VSelf, path_t root) /* LOCAL FILE */ static void -pioLocalFile_fobjDispose(VSelf) +pioLocalReadFile_fobjDispose(VSelf) { - Self(pioLocalFile); + Self(pioLocalReadFile); if (!self->p.closed) { close(self->fd); @@ -2810,9 +2810,9 @@ pioLocalFile_fobjDispose(VSelf) } static err_i -pioLocalFile_pioClose(VSelf) +pioLocalReadFile_pioClose(VSelf) { - Self(pioLocalFile); + Self(pioLocalReadFile); err_i err = $noerr(); int r; @@ -2828,9 +2828,9 @@ pioLocalFile_pioClose(VSelf) } static size_t -pioLocalFile_pioRead(VSelf, ft_bytes_t buf, err_i *err) +pioLocalReadFile_pioRead(VSelf, ft_bytes_t buf, err_i *err) { - Self(pioLocalFile); + Self(pioLocalReadFile); ssize_t r; fobj_reset_err(err); @@ -2847,9 +2847,9 @@ pioLocalFile_pioRead(VSelf, ft_bytes_t buf, err_i *err) } static err_i -pioLocalFile_pioSeek(VSelf, uint64_t offs) +pioLocalReadFile_pioSeek(VSelf, uint64_t offs) { - Self(pioLocalFile); + Self(pioLocalReadFile); ft_assert(self->fd >= 0, "Closed file abused \"%s\"", self->p.path); @@ -2862,10 +2862,10 @@ pioLocalFile_pioSeek(VSelf, uint64_t offs) } static fobjStr* -pioLocalFile_fobjRepr(VSelf) +pioLocalReadFile_fobjRepr(VSelf) { - Self(pioLocalFile); - return $fmt("pioLocalFile({path:q}, fd:{fd}", + Self(pioLocalReadFile); + return $fmt("pioLocalReadFile({path:q}, fd:{fd}", (path, $S(self->p.path)), (fd, $I(self->fd))); } @@ -5625,7 +5625,7 @@ fobj_klass_handle(pioRemotePagesIterator); fobj_klass_handle(pioFile); fobj_klass_handle(pioLocalDrive); fobj_klass_handle(pioRemoteDrive); -fobj_klass_handle(pioLocalFile, inherits(pioFile), mth(fobjDispose, fobjRepr)); +fobj_klass_handle(pioLocalReadFile, inherits(pioFile), mth(fobjDispose, fobjRepr)); fobj_klass_handle(pioRemoteFile, inherits(pioFile), mth(fobjDispose, fobjRepr)); fobj_klass_handle(pioLocalWriteFile); fobj_klass_handle(pioRemoteWriteFile); From 06326900a5e4cd90b132e2965747d4790833bf0b Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 23 Dec 2022 02:23:35 +0300 Subject: [PATCH 285/339] [PBCKP-426] buffer pioLocalReadFile --- src/utils/file.c | 95 ++++++++++++++++++++++++++++++++++-------------- 1 file changed, 68 insertions(+), 27 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index 425b0ecf9..525a5d60b 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1946,8 +1946,11 @@ fobj_klass(pioFile); typedef struct pioLocalReadFile { - pioFile p; - int fd; + ft_str_t path; + int fd; + uint64_t off; + ft_bytes_t buf; + ft_bytes_t remain; } pioLocalReadFile; #define kls__pioLocalReadFile iface__pioReader, iface(pioReader, pioReadStream) fobj_klass(pioLocalReadFile); @@ -2167,8 +2170,10 @@ pioLocalDrive_pioOpenRead(VSelf, path_t path, err_i *err) return (pioReader_i){NULL}; } - file = $alloc(pioLocalReadFile, .fd = fd, - .p = { .path = ft_cstrdup(path) } ); + file = $alloc(pioLocalReadFile, + .fd = fd, + .path = ft_strdupc(path), + .buf = ft_bytes_alloc(CHUNK_SIZE)); return $bind(pioReader, file); } @@ -2801,12 +2806,13 @@ static void pioLocalReadFile_fobjDispose(VSelf) { Self(pioLocalReadFile); - if (!self->p.closed) + if (self->fd >= 0) { close(self->fd); self->fd = -1; - self->p.closed = true; } + ft_str_free(&self->path); + ft_bytes_free(&self->buf); } static err_i @@ -2816,48 +2822,83 @@ pioLocalReadFile_pioClose(VSelf) err_i err = $noerr(); int r; - ft_assert(self->fd >= 0, "Closed file abused \"%s\"", self->p.path); + ft_assert(self->fd >= 0, "Closed file abused \"%s\"", self->path.ptr); r = close(self->fd); - if (r < 0 && $isNULL(err)) + if (r < 0) err = $syserr(errno, "Cannot close file {path:q}", - path(self->p.path)); + path(self->path.ptr)); self->fd = -1; - self->p.closed = true; return err; } static size_t pioLocalReadFile_pioRead(VSelf, ft_bytes_t buf, err_i *err) { - Self(pioLocalReadFile); - ssize_t r; - fobj_reset_err(err); + Self(pioLocalReadFile); + size_t buflen = buf.len; + ft_bytes_t to_read; + ssize_t r; + fobj_reset_err(err); - ft_assert(self->fd >= 0, "Closed file abused \"%s\"", self->p.path); + ft_assert(self->fd >= 0, "Closed file abused \"%s\"", self->path.ptr); - r = read(self->fd, buf.ptr, buf.len); - if (r < 0) - { - *err = $syserr(errno, "Cannot read from {path:q}", - path(self->p.path)); - return 0; - } - return r; + ft_bytes_move(&buf, &self->remain); + + while (buf.len && $noerr(*err)) + { + ft_assert(self->remain.len == 0); + + to_read = buf.len >= self->buf.len/2 ? buf : self->buf; + + r = read(self->fd, to_read.ptr, to_read.len); + if (r < 0) + *err = $syserr(errno, "Cannot read from {path:q}", + path(self->path.ptr)); + else if (r == 0) + break; + + if (to_read.ptr == buf.ptr) + ft_bytes_consume(&buf, r); + else + { + self->remain = ft_bytes(self->buf.ptr, r); + ft_bytes_move(&buf, &self->remain); + } + } + + self->off += buflen - buf.len; + return buflen - buf.len; } static err_i pioLocalReadFile_pioSeek(VSelf, uint64_t offs) { Self(pioLocalReadFile); + uint64_t delta; + off_t pos; - ft_assert(self->fd >= 0, "Closed file abused \"%s\"", self->p.path); + ft_assert(self->fd >= 0, "Closed file abused \"%s\"", self->path.ptr); - off_t pos = lseek(self->fd, offs, SEEK_SET); + delta = offs - self->off; /* yep, wrapping. but we are check for >= */ + if (offs >= self->off && delta <= self->remain.len) + { + ft_bytes_consume(&self->remain, delta); + self->off = offs; + return $noerr(); + } + /* + * Drop buffer if we seek too far or if we seek back. + * Seek back is used to re-read data from disk, so no buffer allowed. + */ + self->remain = ft_bytes(NULL, 0); + pos = lseek(self->fd, offs, SEEK_SET); if (pos == (off_t)-1) - return $syserr(errno, "Can not seek to {offs} in file {path:q}", offs(offs), path(self->p.path)); + return $syserr(errno, "Can not seek to {offs} in file {path:q}", offs(offs), path(self->path.ptr)); + ft_assert(pos == offs); + self->off = offs; return $noerr(); } @@ -2866,7 +2907,7 @@ pioLocalReadFile_fobjRepr(VSelf) { Self(pioLocalReadFile); return $fmt("pioLocalReadFile({path:q}, fd:{fd}", - (path, $S(self->p.path)), (fd, $I(self->fd))); + (path, $S(self->path.ptr)), (fd, $I(self->fd))); } static err_i @@ -5625,7 +5666,7 @@ fobj_klass_handle(pioRemotePagesIterator); fobj_klass_handle(pioFile); fobj_klass_handle(pioLocalDrive); fobj_klass_handle(pioRemoteDrive); -fobj_klass_handle(pioLocalReadFile, inherits(pioFile), mth(fobjDispose, fobjRepr)); +fobj_klass_handle(pioLocalReadFile, mth(fobjDispose, fobjRepr)); fobj_klass_handle(pioRemoteFile, inherits(pioFile), mth(fobjDispose, fobjRepr)); fobj_klass_handle(pioLocalWriteFile); fobj_klass_handle(pioRemoteWriteFile); From d0b5e19bb9a9dda2c8e618e2ca3f4a611cf4fdc1 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 23 Dec 2022 02:51:50 +0300 Subject: [PATCH 286/339] [PBCKP-426] add special method pioFileStat for pioLocalReadFile It will be useful in pioLocalPagesIterator. --- src/utils/file.c | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/src/utils/file.c b/src/utils/file.c index 525a5d60b..666116ca4 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1944,6 +1944,10 @@ typedef struct pioFile #define kls__pioFile mth(fobjDispose) fobj_klass(pioFile); +/* define it because pioLocalPagesIterator wants stat from local file */ +#define mth__pioFileStat pio_stat_t, (err_i*, err) +fobj_method(pioFileStat); + typedef struct pioLocalReadFile { ft_str_t path; @@ -1952,7 +1956,8 @@ typedef struct pioLocalReadFile ft_bytes_t buf; ft_bytes_t remain; } pioLocalReadFile; -#define kls__pioLocalReadFile iface__pioReader, iface(pioReader, pioReadStream) +#define kls__pioLocalReadFile iface__pioReader, iface(pioReader, pioReadStream), \ + mth(pioFileStat) fobj_klass(pioLocalReadFile); typedef struct pioLocalWriteFile @@ -2902,6 +2907,28 @@ pioLocalReadFile_pioSeek(VSelf, uint64_t offs) return $noerr(); } +static pio_stat_t +pioLocalReadFile_pioFileStat(VSelf, err_i *err) +{ + Self(pioLocalReadFile); + struct stat st = {0}; + pio_stat_t pst = {0}; + int r; + fobj_reset_err(err); + + r = fstat(self->fd, &st); + if (r < 0) + *err = $syserr(errno, "Cannot stat file {path:q}", path(self->path.ptr)); + else + { + pst.pst_kind = pio_statmode2file_kind(st.st_mode, self->path.ptr); + pst.pst_mode = pio_limit_mode(st.st_mode); + pst.pst_size = st.st_size; + pst.pst_mtime = st.st_mtime; + } + return pst; +} + static fobjStr* pioLocalReadFile_fobjRepr(VSelf) { From be6ceac93aa6aa819f8f8dc6be07495d874a3093 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 23 Dec 2022 02:57:36 +0300 Subject: [PATCH 287/339] [PBCKP-426] use pioLocalFile in pioLocalPagesIterator I believe it better handles buffering with seeks than FILE*. --- src/utils/file.c | 100 +++++++++++++++++++------------------------ tests/backup_test.py | 2 +- 2 files changed, 44 insertions(+), 58 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index 666116ca4..94525a956 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -5207,9 +5207,8 @@ typedef struct pioLocalPagesIterator bool just_validate; int segno; datapagemap_t map; - FILE *in; - void *buf; - const char *from_fullpath; + pioReader_i in; + char* from_fullpath; /* prev_backup_start_lsn */ XLogRecPtr start_lsn; @@ -5351,54 +5350,40 @@ pioLocalDrive_pioIteratePages(VSelf, path_t path, int segno, datapagemap_t pagemap, XLogRecPtr start_lsn, CompressAlg calg, int clevel, - uint32 checksum_version, bool just_validate, err_i *err) + uint32 checksum_version, bool just_validate, + err_i *err) { Self(pioLocalDrive); fobj_t iter = {0}; BlockNumber n_blocks; - FILE *in; - void *buf; - size_t bufsz; - int fd; - struct stat st; + pioReader_i in; + pio_stat_t st; fobj_reset_err(err); - in = fopen(path, PG_BINARY_R); - if (!in) - { - pioPagesIterator_i ret = {0}; - *err = $syserr(errno, "Cannot iterate pages"); - return ret; - } - - fd = fileno(in); - if (fstat(fd, &st) == -1) - { - fclose(in); - *err = $syserr(errno, "Cannot stat datafile"); + in = $(pioOpenRead, self, path, .err = err); + if ($haserr(*err)) return $null(pioPagesIterator); - } - bufsz = pagemap.bitmapsize > 0 ? SMALL_CHUNK_SIZE : MEDIUM_CHUNK_SIZE; - buf = ft_malloc(bufsz); - setvbuf(in, buf, _IOFBF, bufsz); + /* we know it is pioLocalReadFile which implements pioFileStat */ + st = $(pioFileStat, in.self, .err = err); + if ($haserr(*err)) + return $null(pioPagesIterator); /* * Compute expected number of blocks in the file. * NOTE This is a normal situation, if the file size has changed * since the moment we computed it. */ - n_blocks = ft_div_i64u32_to_i32(st.st_size, BLCKSZ); + n_blocks = ft_div_i64u32_to_i32(st.pst_size, BLCKSZ); iter = $alloc(pioLocalPagesIterator, .segno = segno, .n_blocks = n_blocks, .just_validate = just_validate, - .from_fullpath = path, + .from_fullpath = ft_cstrdup(path), .map = pagemap, - .in = in, - .buf = buf, + .in = $iref(in), .start_lsn = start_lsn, .calg = calg, .clevel = clevel, @@ -5412,14 +5397,15 @@ pioLocalPagesIterator_fobjDispose(VSelf) { Self(pioLocalPagesIterator); - if (self->buf) ft_free(self->buf); - if (self->in) fclose(self->in); + $idel(&self->in); + ft_free(self->from_fullpath); } static int32 prepare_page(pioLocalPagesIterator *iter, BlockNumber blknum, Page page, - PageState *page_st); + PageState *page_st, + err_i *err); static err_i pioLocalPagesIterator_pioNextPage(VSelf, PageIteratorValue *value) @@ -5430,6 +5416,7 @@ pioLocalPagesIterator_pioNextPage(VSelf, PageIteratorValue *value) BlockNumber blknum; BlockNumber n_blocks; int rc = PageIsOk; + err_i err; blknum = self->blknum; value->compressed_size = 0; @@ -5447,7 +5434,10 @@ pioLocalPagesIterator_pioNextPage(VSelf, PageIteratorValue *value) value->blknum = blknum; self->blknum = blknum+1; - rc = prepare_page(self, blknum, page_buf, &value->state); + rc = prepare_page(self, blknum, page_buf, &value->state, &err); + if ($haserr(err)) + return $iresult(err); + value->page_result = rc; if (rc == PageIsTruncated) goto re_stat; @@ -5466,12 +5456,12 @@ pioLocalPagesIterator_pioNextPage(VSelf, PageIteratorValue *value) * prepare_page found file is shorter than expected. * Lets re-investigate its length. */ - struct stat st; - int fd = fileno(self->in); - if (fstat(fd, &st) < 0) - return $syserr(errno, "Re-stat-ting file {path}", - path(self->from_fullpath)); - n_blocks = ft_div_i64u32_to_i32(st.st_size, BLCKSZ); + pio_stat_t st; + /* abuse we know self->in is pioLocalReadFile */ + st = $(pioFileStat, self->in.self, .err = &err); + if ($haserr(err)) + return $iresult(err); + n_blocks = ft_div_i64u32_to_i32(st.pst_size, BLCKSZ); /* we should not "forget" already produced pages */ if (n_blocks < self->lastblkn) n_blocks = self->lastblkn; @@ -5508,13 +5498,16 @@ pioLocalPagesIterator_pioFinalPageN(VSelf) * return it to the caller */ static int32 -prepare_page(pioLocalPagesIterator *iter, BlockNumber blknum, Page page, PageState *page_st) +prepare_page(pioLocalPagesIterator *iter, BlockNumber blknum, Page page, + PageState *page_st, err_i *err) { int try_again = PAGE_READ_ATTEMPTS; bool page_is_valid = false; const char *from_fullpath = iter->from_fullpath; BlockNumber absolute_blknum = iter->segno * RELSEG_SIZE + blknum; - int rc = 0; + int rc = 0; + size_t read_len; + fobj_reset_err(err); /* check for interrupt */ if (interrupted || thread_interrupted) @@ -5527,15 +5520,13 @@ prepare_page(pioLocalPagesIterator *iter, BlockNumber blknum, Page page, PageSta */ while (!page_is_valid && try_again--) { - int read_len = fseeko(iter->in, (off_t)blknum * BLCKSZ, SEEK_SET); - if (read_len == 0) /* seek is successful */ - { - /* read the block */ - read_len = fread(page, 1, BLCKSZ, iter->in); - if (read_len == 0 && ferror(iter->in)) - read_len = -1; - } + *err = $i(pioSeek, iter->in, (uint64_t)blknum * BLCKSZ); + if ($haserr(*err)) + return PageIsCorrupted; + read_len = $i(pioRead, iter->in, ft_bytes(page, BLCKSZ), .err = err); + if ($haserr(*err)) + return PageIsCorrupted; /* The block could have been truncated. It is fine. */ if (read_len == 0) { @@ -5543,13 +5534,10 @@ prepare_page(pioLocalPagesIterator *iter, BlockNumber blknum, Page page, PageSta "block truncated", blknum, from_fullpath); return PageIsTruncated; } - else if (read_len < 0) - elog(ERROR, "Cannot read block %u of \"%s\": %s", - blknum, from_fullpath, strerror(errno)); else if (read_len != BLCKSZ) elog(WARNING, "Cannot read block %u of \"%s\": " - "read %i of %d, try again", - blknum, from_fullpath, read_len, BLCKSZ); + "read %lld of %d, try again", + blknum, from_fullpath, (long long)read_len, BLCKSZ); else { /* We have BLCKSZ of raw data, validate it */ @@ -5583,8 +5571,6 @@ prepare_page(pioLocalPagesIterator *iter, BlockNumber blknum, Page page, PageSta Assert(false); } } - /* avoid re-reading once buffered data, flushing on further attempts, see PBCKP-150 */ - fflush(iter->in); } /* diff --git a/tests/backup_test.py b/tests/backup_test.py index 16ac0cab5..bbad8a591 100644 --- a/tests/backup_test.py +++ b/tests/backup_test.py @@ -1710,7 +1710,7 @@ def test_basic_missing_file_permissions(self): os.chmod(full_path, 000) with self.assertRaisesRegex(ProbackupException, - r"ERROR: [^\n]*Cannot iterate pages: Permission denied"): + r"ERROR: [^\n]*: Permission denied"): # FULL backup self.backup_node( backup_dir, 'node', node, options=['--stream']) From c67538050353bf536bfbe55e41f9e68d8db1ce0a Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 23 Dec 2022 03:22:43 +0300 Subject: [PATCH 288/339] backup: descending sort by file size it is better suited for load balancing among threads --- src/backup.c | 2 +- src/checkdb.c | 2 +- src/dir.c | 13 +++---------- src/pg_probackup.h | 1 - 4 files changed, 5 insertions(+), 13 deletions(-) diff --git a/src/backup.c b/src/backup.c index 7901484aa..40c617704 100644 --- a/src/backup.c +++ b/src/backup.c @@ -421,7 +421,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, pfilearray_clear_locks(backup_files_list); /* Sort by size for load balancing */ - parray_qsort(backup_files_list, pgFileCompareSize); + parray_qsort(backup_files_list, pgFileCompareSizeDesc); /* Sort the array for binary search */ if (prev_backup_filelist) parray_qsort(prev_backup_filelist, pgFileCompareRelPathWithExternal); diff --git a/src/checkdb.c b/src/checkdb.c index 6880b0457..b1d6bd41d 100644 --- a/src/checkdb.c +++ b/src/checkdb.c @@ -227,7 +227,7 @@ do_block_validation(char *pgdata, uint32 checksum_version) } /* Sort by size for load balancing */ - parray_qsort(files_list, pgFileCompareSize); + parray_qsort(files_list, pgFileCompareSizeDesc); /* init thread args with own file lists */ threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads); diff --git a/src/dir.c b/src/dir.c index 69e423563..aaec1dea1 100644 --- a/src/dir.c +++ b/src/dir.c @@ -313,26 +313,19 @@ pgFileCompareLinked(const void *f1, const void *f2) /* Compare two pgFile with their size */ int -pgFileCompareSize(const void *f1, const void *f2) +pgFileCompareSizeDesc(const void *f1, const void *f2) { pgFile *f1p = *(pgFile **)f1; pgFile *f2p = *(pgFile **)f2; - if (f1p->size > f2p->size) + if (f1p->size < f2p->size) return 1; - else if (f1p->size < f2p->size) + else if (f1p->size > f2p->size) return -1; else return 0; } -/* Compare two pgFile with their size in descending order */ -int -pgFileCompareSizeDesc(const void *f1, const void *f2) -{ - return -1 * pgFileCompareSize(f1, f2); -} - int pgCompareString(const void *str1, const void *str2) { diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 3865867b5..80a1b863d 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1018,7 +1018,6 @@ extern int pgFileCompareRelPathWithString(const void *f1, const void *f2); extern int pgFileCompareRelPathWithExternal(const void *f1, const void *f2); extern int pgFileCompareRelPathWithExternalDesc(const void *f1, const void *f2); extern int pgFileCompareLinked(const void *f1, const void *f2); -extern int pgFileCompareSize(const void *f1, const void *f2); extern int pgFileCompareSizeDesc(const void *f1, const void *f2); extern int pgCompareString(const void *str1, const void *str2); extern int pgPrefixCompareString(const void *str1, const void *str2); From 7eed38493433405ec2d9de6f979c0b831d637f76 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 23 Dec 2022 06:35:44 +0300 Subject: [PATCH 289/339] [PBCKP-427] introduce pgFile hashtable We want to sort files be offset in page header map file. But it breaks searching with binary search by filename. Therefore make and use hashtable instead of binary search. --- src/backup.c | 27 +++--- src/catalog.c | 58 +++++++++++++ src/catchup.c | 205 ++++++++++++++++++++++++++------------------- src/data.c | 9 +- src/merge.c | 15 ++-- src/pg_probackup.h | 10 ++- src/restore.c | 4 +- 7 files changed, 211 insertions(+), 117 deletions(-) diff --git a/src/backup.c b/src/backup.c index 40c617704..8cb0eaed4 100644 --- a/src/backup.c +++ b/src/backup.c @@ -27,6 +27,7 @@ /* list of files contained in backup */ parray *backup_files_list = NULL; +parray *backup_files_hash = NULL; /* We need critical section for datapagemap_add() in case of using threads */ static pthread_mutex_t backup_pagemap_mutex = PTHREAD_MUTEX_INITIALIZER; @@ -95,6 +96,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, pgBackup *prev_backup = NULL; parray *prev_backup_filelist = NULL; + parray *prev_backup_hashtable = NULL; parray *backup_list = NULL; parray *external_dirs = NULL; parray *database_map = NULL; @@ -329,6 +331,8 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, /* Extract information about files in backup_list parsing their names:*/ parse_filelist_filenames(backup_files_list, instance_config.pgdata); + backup_files_hash = make_filelist_hashtable(backup_files_list); + elog(INFO, "Current Start LSN: %X/%X, TLI: %X", (uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn), current.tli); @@ -424,7 +428,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, parray_qsort(backup_files_list, pgFileCompareSizeDesc); /* Sort the array for binary search */ if (prev_backup_filelist) - parray_qsort(prev_backup_filelist, pgFileCompareRelPathWithExternal); + prev_backup_hashtable = make_filelist_hashtable(prev_backup_filelist); /* write initial backup_content.control file and update backup.control */ write_backup_filelist(¤t, backup_files_list, @@ -449,7 +453,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, arg->external_prefix = external_prefix; arg->external_dirs = external_dirs; arg->files_list = backup_files_list; - arg->prev_filelist = prev_backup_filelist; + arg->prev_filehash = prev_backup_hashtable; arg->prev_start_lsn = prev_backup_start_lsn; arg->hdr_map = &(current.hdr_map); arg->thread_num = i+1; @@ -492,6 +496,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, { parray_walk(prev_backup_filelist, pgFileFree); parray_free(prev_backup_filelist); + parray_free(prev_backup_hashtable); } /* Notify end of backup */ @@ -609,7 +614,9 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, parray_walk(backup_files_list, pgFileFree); parray_free(backup_files_list); + parray_free(backup_files_hash); backup_files_list = NULL; + backup_files_hash = NULL; } /* @@ -2015,14 +2022,13 @@ backup_files(void *arg) /* Check that file exist in previous backup */ if (current.backup_mode != BACKUP_MODE_FULL) { - pgFile **prev_file_tmp = NULL; - prev_file_tmp = (pgFile **) parray_bsearch(arguments->prev_filelist, - file, pgFileCompareRelPathWithExternal); + pgFile *prev_file_tmp = NULL; + prev_file_tmp = search_file_in_hashtable(arguments->prev_filehash, file); if (prev_file_tmp) { /* File exists in previous backup */ file->exists_in_prev = true; - prev_file = *prev_file_tmp; + prev_file = prev_file_tmp; } } @@ -2209,8 +2215,8 @@ process_block_change(ForkNumber forknum, RelFileNode rnode, BlockNumber blkno) char *rel_path; BlockNumber blkno_inseg; int segno; - pgFile **file_item; - pgFile f; + pgFile *file_item; + pgFile f = {0}; segno = blkno / RELSEG_SIZE; blkno_inseg = blkno % RELSEG_SIZE; @@ -2224,8 +2230,7 @@ process_block_change(ForkNumber forknum, RelFileNode rnode, BlockNumber blkno) f.external_dir_num = 0; /* backup_files_list should be sorted before */ - file_item = (pgFile **) parray_bsearch(backup_files_list, &f, - pgFileCompareRelPathWithExternal); + file_item = search_file_in_hashtable(backup_files_hash, &f); /* * If we don't have any record of this file in the file map, it means @@ -2239,7 +2244,7 @@ process_block_change(ForkNumber forknum, RelFileNode rnode, BlockNumber blkno) if (num_threads > 1) pthread_lock(&backup_pagemap_mutex); - datapagemap_add(&(*file_item)->pagemap, blkno_inseg); + datapagemap_add(&file_item->pagemap, blkno_inseg); if (num_threads > 1) pthread_mutex_unlock(&backup_pagemap_mutex); diff --git a/src/catalog.c b/src/catalog.c index deeadb55d..47de224b4 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -1174,6 +1174,64 @@ get_backup_filelist(pgBackup *backup, bool strict) return files; } +static uint32_t +pgFileHashRelPathWithExternal(pgFile* file) +{ + uint32_t hash = ft_small_cstr_hash(file->rel_path); + hash = ft_mix32(hash ^ file->external_dir_num); + return hash ? hash : 1; +} + +parray * +make_filelist_hashtable(parray* files) +{ + parray* buckets; + size_t nbuckets; + pgFile* file; + size_t i; + size_t pos; + + if (parray_num(files) == 0) + return NULL; + + buckets = parray_new(); + nbuckets = ft_nextpow2(parray_num(files)) / 2; + nbuckets = ft_max(ft_min(nbuckets, UINT32_MAX/2+1), 1); + parray_set(buckets, nbuckets-1, NULL); /* ensure size will be == nbuckets */ + + for (i = 0; i < parray_num(files); i++) + { + file = (pgFile*)parray_get(files, i); + file->hash = pgFileHashRelPathWithExternal(file); + pos = file->hash & (nbuckets - 1); + file->next = parray_get(buckets, pos); + parray_set(buckets, pos, file); + } + + return buckets; +} + +pgFile* +search_file_in_hashtable(parray* buckets, pgFile* file) +{ + pgFile* ent; + size_t pos; + + if (!file->hash) + file->hash = pgFileHashRelPathWithExternal(file); + + pos = file->hash & (parray_num(buckets)-1); + ent = (pgFile*) parray_get(buckets, pos); + while (ent != NULL) + { + if (ent->hash == file->hash && + pgFileCompareRelPathWithExternal(&file, &ent) == 0) + return ent; + ent = ent->next; + } + return NULL; +} + /* * Lock list of backups. Function goes in backward direction. */ diff --git a/src/catchup.c b/src/catchup.c index b72462935..848a290e9 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -353,7 +353,7 @@ typedef struct const char *from_root; const char *to_root; parray *source_filelist; - parray *dest_filelist; + parray *dest_filehash; XLogRecPtr sync_lsn; BackupMode backup_mode; int thread_num; @@ -410,14 +410,13 @@ catchup_thread_runner(void *arg) /* Check that file exist in dest pgdata */ if (arguments->backup_mode != BACKUP_MODE_FULL) { - pgFile **dest_file_tmp = NULL; - dest_file_tmp = (pgFile **) parray_bsearch(arguments->dest_filelist, - file, pgFileCompareRelPathWithExternal); + pgFile *dest_file_tmp = NULL; + dest_file_tmp = search_file_in_hashtable(arguments->dest_filehash, file); if (dest_file_tmp) { /* File exists in destination PGDATA */ file->exists_in_prev = true; - dest_file = *dest_file_tmp; + dest_file = dest_file_tmp; } } @@ -473,7 +472,7 @@ catchup_multithreaded_copy(int num_threads, const char *source_pgdata_path, const char *dest_pgdata_path, parray *source_filelist, - parray *dest_filelist, + parray *dest_filehash, XLogRecPtr sync_lsn, BackupMode backup_mode) { @@ -493,7 +492,7 @@ catchup_multithreaded_copy(int num_threads, .from_root = source_pgdata_path, .to_root = dest_pgdata_path, .source_filelist = source_filelist, - .dest_filelist = dest_filelist, + .dest_filehash = dest_filehash, .sync_lsn = sync_lsn, .backup_mode = backup_mode, .thread_num = i + 1, @@ -531,7 +530,7 @@ catchup_multithreaded_copy(int num_threads, * Sync every file in destination directory to disk */ static void -catchup_sync_destination_files(const char* pgdata_path, fio_location location, parray *filelist, pgFile *pg_control_file) +catchup_sync_destination_files(const char* pgdata_path, fio_location location) { time_t start_time, end_time; char pretty_time[20]; @@ -598,30 +597,34 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, { pioDrive_i local_location = pioDriveForLocation(FIO_LOCAL_HOST); pioDrive_i db_location = pioDriveForLocation(FIO_DB_HOST); - PGconn *source_conn = NULL; - PGNodeInfo source_node_info; - parray *source_filelist = NULL; - pgFile *source_pg_control_file = NULL; - parray *dest_filelist = NULL; - char dest_xlog_path[MAXPGPATH]; + PGconn *source_conn = NULL; + PGNodeInfo source_node_info; + parray *source_filelist = NULL; + parray *source_filehash = NULL; + pgFile *source_pg_control_file = NULL; + parray *dest_filelist = NULL; + parray *dest_filehash = NULL; + char dest_xlog_path[MAXPGPATH]; + + RedoParams dest_redo = {0, InvalidXLogRecPtr, 0}; + PGStopBackupResult stop_backup_result; + bool catchup_isok = true; - RedoParams dest_redo = { 0, InvalidXLogRecPtr, 0 }; - PGStopBackupResult stop_backup_result; - bool catchup_isok = true; - - int i; + int i; /* for fancy reporting */ - time_t start_time, end_time; - int64_t transfered_datafiles_bytes = 0; - int64_t transfered_walfiles_bytes = 0; - char pretty_source_bytes[20]; - err_i err = $noerr(); + time_t start_time, end_time; + int64_t transfered_datafiles_bytes = 0; + int64_t transfered_walfiles_bytes = 0; + char pretty_source_bytes[20]; + err_i err = $noerr(); - source_conn = catchup_init_state(db_location, &source_node_info, source_pgdata, dest_pgdata); + source_conn = catchup_init_state(db_location, &source_node_info, + source_pgdata, dest_pgdata); catchup_preflight_checks(db_location, local_location, - &source_node_info, source_conn, source_pgdata, dest_pgdata); + &source_node_info, source_conn, source_pgdata, + dest_pgdata); /* we need to sort --exclude_path's for future searching */ if (exclude_absolute_paths_list != NULL) @@ -635,11 +638,13 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, { dest_filelist = parray_new(); db_list_dir(dest_filelist, dest_pgdata, true, false, 0, FIO_LOCAL_HOST); - filter_filelist(dest_filelist, dest_pgdata, exclude_absolute_paths_list, exclude_relative_paths_list, "Destination"); + filter_filelist(dest_filelist, dest_pgdata, exclude_absolute_paths_list, + exclude_relative_paths_list, "Destination"); // fill dest_redo.lsn and dest_redo.tli get_redo(local_location, dest_pgdata, &dest_redo); - elog(INFO, "syncLSN = %X/%X", (uint32) (dest_redo.lsn >> 32), (uint32) dest_redo.lsn); + elog(INFO, "syncLSN = %X/%X", (uint32) (dest_redo.lsn >> 32), + (uint32) dest_redo.lsn); /* * Future improvement to catch partial catchup: @@ -656,35 +661,39 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, */ if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK) { - XLogRecPtr ptrack_lsn = get_last_ptrack_lsn(source_conn, &source_node_info); + XLogRecPtr ptrack_lsn = get_last_ptrack_lsn(source_conn, + &source_node_info); if (ptrack_lsn > dest_redo.lsn || ptrack_lsn == InvalidXLogRecPtr) - elog(ERROR, "LSN from ptrack_control in source %X/%X is greater than checkpoint LSN in destination %X/%X.\n" - "You can perform only FULL catchup.", - (uint32) (ptrack_lsn >> 32), (uint32) (ptrack_lsn), - (uint32) (dest_redo.lsn >> 32), - (uint32) (dest_redo.lsn)); + elog(ERROR, + "LSN from ptrack_control in source %X/%X is greater than checkpoint LSN in destination %X/%X.\n" + "You can perform only FULL catchup.", + (uint32) (ptrack_lsn >> 32), (uint32) (ptrack_lsn), + (uint32) (dest_redo.lsn >> 32), + (uint32) (dest_redo.lsn)); } { - char label[1024]; + char label[1024]; /* notify start of backup to PostgreSQL server */ time2iso(label, lengthof(label), current.start_time, false); strncat(label, " with pg_probackup", lengthof(label) - - strlen(" with pg_probackup")); + strlen(" with pg_probackup")); /* Call pg_start_backup function in PostgreSQL connect */ - pg_start_backup(label, smooth_checkpoint, ¤t, &source_node_info, source_conn); - elog(INFO, "pg_start_backup START LSN %X/%X", (uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn)); + pg_start_backup(label, smooth_checkpoint, ¤t, &source_node_info, + source_conn); + elog(INFO, "pg_start_backup START LSN %X/%X", + (uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn)); } /* Sanity: source cluster must be "in future" relatively to dest cluster */ if (current.backup_mode != BACKUP_MODE_FULL && dest_redo.lsn > current.start_lsn) - elog(ERROR, "Current START LSN %X/%X is lower than SYNC LSN %X/%X, " - "it may indicate that we are trying to catchup with PostgreSQL instance from the past", - (uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn), - (uint32) (dest_redo.lsn >> 32), (uint32) (dest_redo.lsn)); + elog(ERROR, "Current START LSN %X/%X is lower than SYNC LSN %X/%X, " + "it may indicate that we are trying to catchup with PostgreSQL instance from the past", + (uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn), + (uint32) (dest_redo.lsn >> 32), (uint32) (dest_redo.lsn)); /* Start stream replication */ join_path_components(dest_xlog_path, dest_pgdata, PG_XLOG_DIR); @@ -692,11 +701,12 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, { err = $i(pioMakeDir, local_location, .path = dest_xlog_path, .mode = DIR_PERMISSION, .strict = false); - if($haserr(err)) + if ($haserr(err)) { elog(ERROR, "Can not create WAL directory: %s", $errmsg(err)); } - start_WAL_streaming(source_conn, dest_xlog_path, &instance_config.conn_opt, + start_WAL_streaming(source_conn, dest_xlog_path, + &instance_config.conn_opt, current.start_lsn, current.tli, false); } else @@ -730,29 +740,36 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, * extractPageMap(), make_pagemap_from_ptrack(). */ parray_qsort(source_filelist, pgFileCompareRelPathWithExternal); + source_filehash = make_filelist_hashtable(source_filelist); + //REVIEW Do we want to do similar calculation for dest? //REVIEW_ANSWER what for? { - ssize_t source_bytes = 0; - char pretty_bytes[20]; + ssize_t source_bytes = 0; + char pretty_bytes[20]; source_bytes += calculate_datasize_of_filelist(source_filelist); /* Extract information about files in source_filelist parsing their names:*/ parse_filelist_filenames(source_filelist, source_pgdata); - filter_filelist(source_filelist, source_pgdata, exclude_absolute_paths_list, exclude_relative_paths_list, "Source"); + filter_filelist(source_filelist, source_pgdata, + exclude_absolute_paths_list, + exclude_relative_paths_list, "Source"); current.pgdata_bytes += calculate_datasize_of_filelist(source_filelist); - pretty_size(current.pgdata_bytes, pretty_source_bytes, lengthof(pretty_source_bytes)); - pretty_size(source_bytes - current.pgdata_bytes, pretty_bytes, lengthof(pretty_bytes)); - elog(INFO, "Source PGDATA size: %s (excluded %s)", pretty_source_bytes, pretty_bytes); + pretty_size(current.pgdata_bytes, pretty_source_bytes, + lengthof(pretty_source_bytes)); + pretty_size(source_bytes - current.pgdata_bytes, pretty_bytes, + lengthof(pretty_bytes)); + elog(INFO, "Source PGDATA size: %s (excluded %s)", pretty_source_bytes, + pretty_bytes); } elog(INFO, "Start LSN (source): %X/%X, TLI: %X", - (uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn), - current.tli); + (uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn), + current.tli); if (current.backup_mode != BACKUP_MODE_FULL) elog(INFO, "LSN in destination: %X/%X, TLI: %X", (uint32) (dest_redo.lsn >> 32), (uint32) (dest_redo.lsn), @@ -766,9 +783,9 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, /* Build the page map from ptrack information */ make_pagemap_from_ptrack_2(source_filelist, source_conn, - source_node_info.ptrack_schema, - source_node_info.ptrack_version_num, - dest_redo.lsn); + source_node_info.ptrack_schema, + source_node_info.ptrack_version_num, + dest_redo.lsn); time(&end_time); elog(INFO, "Pagemap successfully extracted, time elapsed: %.0f sec", difftime(end_time, start_time)); @@ -786,7 +803,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, */ for (i = 0; i < parray_num(source_filelist); i++) { - pgFile *file = (pgFile *) parray_get(source_filelist, i); + pgFile *file = (pgFile *) parray_get(source_filelist, i); char parent_dir[MAXPGPATH]; if (file->kind != PIO_KIND_DIRECTORY || file->excluded) @@ -804,7 +821,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, if (strcmp(parent_dir, PG_TBLSPC_DIR) != 0) { /* if the entry is a regular directory, create it in the destination */ - char dirpath[MAXPGPATH]; + char dirpath[MAXPGPATH]; join_path_components(dirpath, dest_pgdata, file->rel_path); @@ -823,30 +840,34 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, { /* this directory located in pg_tblspc */ const char *linked_path = NULL; - char to_path[MAXPGPATH]; + char to_path[MAXPGPATH]; // TODO perform additional check that this is actually symlink? { /* get full symlink path and map this path to new location */ - char source_full_path[MAXPGPATH]; - char symlink_content[MAXPGPATH]; - join_path_components(source_full_path, source_pgdata, file->rel_path); - fio_readlink(FIO_DB_HOST, source_full_path, symlink_content, sizeof(symlink_content)); + char source_full_path[MAXPGPATH]; + char symlink_content[MAXPGPATH]; + join_path_components(source_full_path, source_pgdata, + file->rel_path); + fio_readlink(FIO_DB_HOST, source_full_path, symlink_content, + sizeof(symlink_content)); /* we checked that mapping exists in preflight_checks for local catchup */ linked_path = get_tablespace_mapping(symlink_content); - elog(INFO, "Map tablespace full_path: \"%s\" old_symlink_content: \"%s\" new_symlink_content: \"%s\"\n", - source_full_path, - symlink_content, - linked_path); + elog(INFO, + "Map tablespace full_path: \"%s\" old_symlink_content: \"%s\" new_symlink_content: \"%s\"\n", + source_full_path, + symlink_content, + linked_path); } if (!is_absolute_path(linked_path)) - elog(ERROR, "Tablespace directory path must be an absolute path: %s\n", - linked_path); + elog(ERROR, + "Tablespace directory path must be an absolute path: %s\n", + linked_path); join_path_components(to_path, dest_pgdata, file->rel_path); elog(INFO, "Create directory \"%s\" and symbolic link \"%s\"", - linked_path, to_path); + linked_path, to_path); if (!dry_run) { @@ -855,13 +876,15 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, .mode = file->mode, .strict = false); if ($haserr(err)) { - elog(ERROR, "Could not create tablespace directory \"%s\": \"%s\"", + elog(ERROR, + "Could not create tablespace directory \"%s\": \"%s\"", linked_path, $errmsg(err)); } /* create link to linked_path */ if (fio_symlink(FIO_LOCAL_HOST, linked_path, to_path, true) < 0) - elog(ERROR, "Could not create symbolic link \"%s\" -> \"%s\": %s", + elog(ERROR, + "Could not create symbolic link \"%s\" -> \"%s\": %s", to_path, linked_path, strerror(errno)); } } @@ -878,10 +901,14 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, /* pgFileCompareRelPathWithExternal uses only .rel_path and .external_dir_num for comparision */ search_key.rel_path = XLOG_CONTROL_FILE; search_key.external_dir_num = 0; - control_file_elem_index = parray_bsearch_index(source_filelist, &search_key, pgFileCompareRelPathWithExternal); - if(control_file_elem_index < 0) - elog(ERROR, "\"%s\" not found in \"%s\"\n", XLOG_CONTROL_FILE, source_pgdata); - source_pg_control_file = parray_remove(source_filelist, control_file_elem_index); + control_file_elem_index = parray_bsearch_index(source_filelist, + &search_key, + pgFileCompareRelPathWithExternal); + if (control_file_elem_index < 0) + elog(ERROR, "\"%s\" not found in \"%s\"\n", XLOG_CONTROL_FILE, + source_pgdata); + source_pg_control_file = parray_remove(source_filelist, + control_file_elem_index); } /* TODO before public release: must be more careful with pg_control. @@ -907,18 +934,18 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, parray_qsort(dest_filelist, pgFileCompareRelPathWithExternalDesc); for (i = 0; i < parray_num(dest_filelist); i++) { - bool redundant = true; - pgFile *file = (pgFile *) parray_get(dest_filelist, i); - pgFile **src_file = NULL; + bool redundant = true; + pgFile *file = (pgFile *) parray_get(dest_filelist, i); + pgFile *src_file = NULL; //TODO optimize it and use some merge-like algorithm //instead of bsearch for each file. - src_file = (pgFile **) parray_bsearch(source_filelist, file, pgFileCompareRelPathWithExternal); + src_file = search_file_in_hashtable(source_filehash, file); - if (src_file!= NULL && !(*src_file)->excluded && file->excluded) - (*src_file)->excluded = true; + if (src_file != NULL && !src_file->excluded && file->excluded) + src_file->excluded = true; - if (src_file!= NULL || file->excluded) + if (src_file != NULL || file->excluded) redundant = false; /* pg_filenode.map are always copied, because it's crc cannot be trusted */ @@ -929,7 +956,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, /* if file does not exists in destination list, then we can safely unlink it */ if (redundant) { - char fullpath[MAXPGPATH]; + char fullpath[MAXPGPATH]; join_path_components(fullpath, dest_pgdata, file->rel_path); if (!dry_run) @@ -937,7 +964,9 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, if (fio_remove(FIO_LOCAL_HOST, fullpath, false) == 0) elog(LOG, "Deleted file \"%s\"", fullpath); else - elog(ERROR, "Cannot delete redundant file in destination \"%s\": %s", fullpath, strerror(errno)); + elog(ERROR, + "Cannot delete redundant file in destination \"%s\": %s", + fullpath, strerror(errno)); } else elog(LOG, "Deleted file \"%s\"", fullpath); @@ -958,14 +987,14 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, /* Sort the array for binary search */ if (dest_filelist) - parray_qsort(dest_filelist, pgFileCompareRelPathWithExternal); + dest_filehash = make_filelist_hashtable(dest_filelist); /* run copy threads */ elog(INFO, "Start transferring data files"); time(&start_time); transfered_datafiles_bytes = catchup_multithreaded_copy(num_threads, &source_node_info, source_pgdata, dest_pgdata, - source_filelist, dest_filelist, + source_filelist, dest_filehash, dest_redo.lsn, current.backup_mode); catchup_isok = transfered_datafiles_bytes != -1; @@ -1099,7 +1128,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, /* Sync all copied files unless '--no-sync' flag is used */ if (sync_dest_files && !dry_run) - catchup_sync_destination_files(dest_pgdata, FIO_LOCAL_HOST, source_filelist, source_pg_control_file); + catchup_sync_destination_files(dest_pgdata, FIO_LOCAL_HOST); else elog(WARNING, "Files are not synced to disk"); @@ -1109,8 +1138,10 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, parray_walk(dest_filelist, pgFileFree); } parray_free(dest_filelist); + parray_free(dest_filehash); parray_walk(source_filelist, pgFileFree); parray_free(source_filelist); + parray_free(source_filehash); pgFileFree(source_pg_control_file); return 0; diff --git a/src/data.c b/src/data.c index 715ab5dab..a425e5181 100644 --- a/src/data.c +++ b/src/data.c @@ -641,7 +641,6 @@ restore_data_file(parray *parent_chain, pgFile *dest_file, pioDBWriter_i out, char from_fullpath[MAXPGPATH]; pioReader_i in; - pgFile **res_file = NULL; pgFile *tmp_file = NULL; /* page headers */ @@ -655,8 +654,7 @@ restore_data_file(parray *parent_chain, pgFile *dest_file, pioDBWriter_i out, backup_seq--; /* lookup file in intermediate backup */ - res_file = parray_bsearch(backup->files, dest_file, pgFileCompareRelPathWithExternal); - tmp_file = (res_file) ? *res_file : NULL; + tmp_file = search_file_in_hashtable(backup->hashtable, dest_file); /* Destination file is not exists yet at this moment */ if (tmp_file == NULL) @@ -1057,11 +1055,8 @@ restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, pgFile *dest_ tmp_backup = dest_backup->parent_backup_link; while (tmp_backup) { - pgFile **res_file = NULL; - /* lookup file in intermediate backup */ - res_file = parray_bsearch(tmp_backup->files, dest_file, pgFileCompareRelPathWithExternal); - tmp_file = (res_file) ? *res_file : NULL; + tmp_file = search_file_in_hashtable(tmp_backup->hashtable, dest_file); /* * It should not be possible not to find destination file in intermediate diff --git a/src/merge.c b/src/merge.c index 9b727941f..108510b53 100644 --- a/src/merge.c +++ b/src/merge.c @@ -582,6 +582,7 @@ merge_chain(InstanceState *instanceState, backup->files = get_backup_filelist(backup, true); parray_qsort(backup->files, pgFileCompareRelPathWithExternal); + backup->hashtable = make_filelist_hashtable(backup->files); /* Set MERGING status for every member of the chain */ if (backup->backup_mode == BACKUP_MODE_FULL) @@ -908,6 +909,7 @@ merge_chain(InstanceState *instanceState, { parray_walk(backup->files, pgFileFree); parray_free(backup->files); + parray_free(backup->hashtable); } } } @@ -1005,14 +1007,12 @@ merge_files(void *arg) for (i = parray_num(arguments->parent_chain) - 1; i >= 0; i--) { - pgFile **res_file = NULL; pgFile *file = NULL; pgBackup *backup = (pgBackup *) parray_get(arguments->parent_chain, i); /* lookup file in intermediate backup */ - res_file = parray_bsearch(backup->files, dest_file, pgFileCompareRelPathWithExternal); - file = (res_file) ? *res_file : NULL; + file = search_file_in_hashtable(backup->hashtable, dest_file); /* Destination file is not exists yet, * in-place merge is impossible @@ -1043,11 +1043,8 @@ merge_files(void *arg) */ if (in_place) { - pgFile **res_file = NULL; pgFile *file = NULL; - res_file = parray_bsearch(arguments->full_backup->files, dest_file, - pgFileCompareRelPathWithExternal); - file = (res_file) ? *res_file : NULL; + file = search_file_in_hashtable(arguments->full_backup->hashtable, dest_file); /* If file didn`t changed in any way, then in-place merge is possible */ if (file && @@ -1283,12 +1280,10 @@ merge_non_data_file(parray *parent_chain, pgBackup *full_backup, */ for (i = 0; i < parray_num(parent_chain); i++) { - pgFile **res_file = NULL; from_backup = (pgBackup *) parray_get(parent_chain, i); /* lookup file in intermediate backup */ - res_file = parray_bsearch(from_backup->files, dest_file, pgFileCompareRelPathWithExternal); - from_file = (res_file) ? *res_file : NULL; + from_file = search_file_in_hashtable(from_backup->hashtable, dest_file); /* * It should not be possible not to find source file in intermediate diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 80a1b863d..ea210a427 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -249,6 +249,10 @@ typedef struct pgFile pg_off_t hdr_off; /* offset in header map */ int hdr_size; /* length of headers */ bool excluded; /* excluded via --exclude-path option */ + + /* hash table entry fields */ + uint32_t hash; + struct pgFile* next; } pgFile; typedef struct page_map_entry @@ -471,6 +475,7 @@ struct pgBackup backup_path/instance_name/backup_id/database */ parray *files; /* list of files belonging to this backup * must be populated explicitly */ + parray *hashtable; /* hash table for faster file search */ char *note; pg_crc32 content_crc; @@ -552,7 +557,7 @@ typedef struct const char *external_prefix; parray *files_list; - parray *prev_filelist; + parray *prev_filehash; parray *external_dirs; XLogRecPtr prev_start_lsn; @@ -1025,6 +1030,9 @@ extern int pgCompareOid(const void *f1, const void *f2); extern void pfilearray_clear_locks(parray *file_list); extern bool set_forkname(pgFile *file); +extern parray* make_filelist_hashtable(parray* files); +extern pgFile* search_file_in_hashtable(parray* buckets, pgFile* file); + /* in data.c */ extern bool check_data_file(pgFile *file, const char *from_fullpath, uint32 checksum_version); diff --git a/src/restore.c b/src/restore.c index 9c951a10d..10a04885e 100644 --- a/src/restore.c +++ b/src/restore.c @@ -772,6 +772,7 @@ restore_chain(InstanceState *instanceState, * using bsearch. */ parray_qsort(backup->files, pgFileCompareRelPathWithExternal); + backup->hashtable = make_filelist_hashtable(backup->files); } /* If dest backup version is older than 2.4.0, then bitmap optimization @@ -920,7 +921,7 @@ restore_chain(InstanceState *instanceState, bool redundant = true; pgFile *file = (pgFile *) parray_get(pgdata_files, i); - if (parray_bsearch(dest_backup->files, file, pgFileCompareRelPathWithExternal)) + if (search_file_in_hashtable(dest_backup->hashtable, file)) redundant = false; /* pg_filenode.map are always restored, because it's crc cannot be trusted */ @@ -1092,6 +1093,7 @@ restore_chain(InstanceState *instanceState, parray_walk(backup->files, pgFileFree); parray_free(backup->files); + parray_free(backup->hashtable); } } From 6b8d3df36829c1e8c8dc1e305f4a15e2107a4c94 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 23 Dec 2022 04:32:01 +0300 Subject: [PATCH 290/339] [PBCKP-427] cache header map files --- src/data.c | 86 +++++++++++++++++++++++++++++++++++++++++----- src/dir.c | 14 ++++++++ src/merge.c | 4 +++ src/pg_probackup.h | 2 ++ src/restore.c | 4 +++ src/utils/file.h | 2 ++ src/validate.c | 4 +++ 7 files changed, 107 insertions(+), 9 deletions(-) diff --git a/src/data.c b/src/data.c index a425e5181..5f00eb942 100644 --- a/src/data.c +++ b/src/data.c @@ -1894,6 +1894,81 @@ copy_pages(const char *to_fullpath, const char *from_fullpath, pgFile *file, return $noerr(); } +typedef struct header_map_cache_item header_map_cache_item_t; +struct header_map_cache_item { + ft_str_t path; + pioReader_i fl; + err_i err; + + header_map_cache_item_t* next; +}; + +typedef struct HeaderMapCache { + header_map_cache_item_t *first; +} HeaderMapCache; +#define kls__HeaderMapCache mth(fobjDispose) +fobj_klass(HeaderMapCache); + +static __thread HeaderMapCache *header_map_cache = NULL; + +/* + * Header_map_cache_init initializes header_map open files cache. + * It allocates object that will reside in AutoReleasePool. + * Therefore it should be called in threads top level function. */ +void +header_map_cache_init(void) +{ + header_map_cache = $alloc(HeaderMapCache); +} + +static pioReadSeek_i +header_map_cache_open(pioDrive_i drive, path_t path, err_i* err) +{ + ft_str_t pth = ft_cstr(path); + header_map_cache_item_t **item; + ft_assert(header_map_cache != NULL); + + item = &header_map_cache->first; + while (*item) + { + if (ft_streq((*item)->path, pth)) + break; + item = &(*item)->next; + } + if ((*item) == NULL) + { + *item = ft_calloc(sizeof(header_map_cache_item_t)); + (*item)->path = ft_strdup(pth); + (*item)->fl = $iref($i(pioOpenRead, drive, .path = path, + .err = &(*item)->err)); + (*item)->err = $iref((*item)->err); + } + *err = (*item)->err; + return $reduce(pioReadSeek, (*item)->fl); +} + +static void +HeaderMapCache_fobjDispose(VSelf) +{ + Self(HeaderMapCache); + header_map_cache_item_t *it; + ft_assert(header_map_cache == self); + header_map_cache = NULL; + + while (self->first) + { + it = self->first; + self->first = it->next; + ft_str_free(&it->path); + $i(pioClose, it->fl); + $idel(&it->fl); + $idel(&it->err); + ft_free(it); + } +} + +fobj_klass_handle(HeaderMapCache); + /* * Attempt to open header file, read content and return as * array of headers. @@ -1915,7 +1990,7 @@ get_data_file_headers(HeaderMap *hdr_map, pgFile *file, uint32 backup_version, b int z_len = 0; char *zheaders = NULL; const char *errormsg = NULL; - pioReader_i reader = {0}; + pioReadSeek_i reader = {0}; size_t rc; err_i err = $noerr(); @@ -1926,7 +2001,7 @@ get_data_file_headers(HeaderMap *hdr_map, pgFile *file, uint32 backup_version, b return NULL; /* TODO: consider to make this descriptor thread-specific */ - reader = $i(pioOpenRead, drive, .path = hdr_map->path, &err); + reader = header_map_cache_open(drive, hdr_map->path, &err); if ($haserr(err)) { elog(strict ? ERROR : WARNING, "Cannot open header file \"%s\": %s", hdr_map->path, strerror(errno)); @@ -1995,13 +2070,6 @@ get_data_file_headers(HeaderMap *hdr_map, pgFile *file, uint32 backup_version, b cleanup: pg_free(zheaders); - if ($notNULL(reader)) - { - err = $i(pioClose,reader); - if ($haserr(err)) - elog(ERROR, "Cannot close file \"%s\"", hdr_map->path); - } - if (!success) { pg_free(headers); diff --git a/src/dir.c b/src/dir.c index aaec1dea1..7f74f4c88 100644 --- a/src/dir.c +++ b/src/dir.c @@ -326,6 +326,20 @@ pgFileCompareSizeDesc(const void *f1, const void *f2) return 0; } +/* + * Compare files by offset in headers file. + * It really matters during restore. + */ +int +pgFileCompareByHdrOff(const void *f1, const void *f2) +{ + pgFile *f1p = *(pgFile **)f1; + pgFile *f2p = *(pgFile **)f2; + + return f1p->hdr_off < f2p->hdr_off ? -1 : + f1p->hdr_off > f2p->hdr_off; +} + int pgCompareString(const void *str1, const void *str2) { diff --git a/src/merge.c b/src/merge.c index 108510b53..70ee282a6 100644 --- a/src/merge.c +++ b/src/merge.c @@ -598,6 +598,8 @@ merge_chain(InstanceState *instanceState, else write_backup_status(backup, BACKUP_STATUS_MERGING, true); } + /* attempt to speedup headers reading at least for dest backup */ + parray_qsort(dest_backup->files, pgFileCompareByHdrOff); /* Construct path to database dir: /backup_dir/instance_name/FULL/database */ join_path_components(full_database_dir, full_backup->root_dir, DATABASE_DIR); @@ -925,6 +927,8 @@ merge_files(void *arg) merge_files_arg *arguments = (merge_files_arg *) arg; size_t n_files = parray_num(arguments->dest_backup->files); + header_map_cache_init(); + for (i = 0; i < n_files; i++) { FOBJ_LOOP_ARP(); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index ea210a427..0a60ee57c 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1024,6 +1024,7 @@ extern int pgFileCompareRelPathWithExternal(const void *f1, const void *f2); extern int pgFileCompareRelPathWithExternalDesc(const void *f1, const void *f2); extern int pgFileCompareLinked(const void *f1, const void *f2); extern int pgFileCompareSizeDesc(const void *f1, const void *f2); +extern int pgFileCompareByHdrOff(const void *f1, const void *f2); extern int pgCompareString(const void *str1, const void *str2); extern int pgPrefixCompareString(const void *str1, const void *str2); extern int pgCompareOid(const void *f1, const void *f2); @@ -1065,6 +1066,7 @@ extern datapagemap_t *get_lsn_map(const char *fullpath, uint32 checksum_version, extern bool validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, uint32 checksum_version, uint32 backup_version, HeaderMap *hdr_map); +extern void header_map_cache_init(void); extern BackupPageHeader2* get_data_file_headers(HeaderMap *hdr_map, pgFile *file, uint32 backup_version, bool strict); extern void write_page_headers(BackupPageHeader2 *headers, pgFile *file, HeaderMap *hdr_map, bool is_merge); extern void init_header_map(pgBackup *backup); diff --git a/src/restore.c b/src/restore.c index 10a04885e..11928020f 100644 --- a/src/restore.c +++ b/src/restore.c @@ -987,6 +987,8 @@ restore_chain(InstanceState *instanceState, time(&start_time); thread_interrupted = false; + parray_qsort(dest_files, pgFileCompareByHdrOff); + /* Restore files into target directory */ for (i = 0; i < num_threads; i++) { @@ -1113,6 +1115,8 @@ restore_files(void *arg) restore_files_arg *arguments = (restore_files_arg *) arg; + header_map_cache_init(); + n_files = parray_num(arguments->dest_files); db_drive = pioDBDriveForLocation(FIO_DB_HOST); diff --git a/src/utils/file.h b/src/utils/file.h index 1ef6d6e29..222ffedd2 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -212,6 +212,7 @@ fobj_method(pioTruncate); fobj_method(pioWriteFinish); fobj_method(pioSeek); +#define iface__pioReadSeek mth(pioRead, pioSeek) #define iface__pioReader mth(pioRead, pioClose, pioSeek) #define iface__pioReadStream mth(pioRead, pioClose) #define iface__pioWriteFlush mth(pioWrite, pioWriteFinish) @@ -219,6 +220,7 @@ fobj_method(pioSeek); #define iface__pioDBWriter mth(pioWrite, pioSeek, pioWriteCompressed), \ mth(pioWriteFinish, pioTruncate, pioClose) #define iface__pioReadCloser mth(pioRead, pioClose) +fobj_iface(pioReadSeek); fobj_iface(pioReader); fobj_iface(pioReadStream); fobj_iface(pioWriteFlush); diff --git a/src/validate.c b/src/validate.c index d7fde27c0..f6d5a2d70 100644 --- a/src/validate.c +++ b/src/validate.c @@ -126,6 +126,8 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params) return; } + parray_qsort(files, pgFileCompareByHdrOff); + // if (params && params->partial_db_list) // dbOid_exclude_list = get_dbOid_exclude_list(backup, files, params->partial_db_list, // params->partial_restore_type); @@ -240,6 +242,8 @@ pgBackupValidateFiles(void *arg) pioDrive_i backup_drive = arguments->backup_drive; err_i err; + header_map_cache_init(); + for (i = 0; i < num_files; i++) { FOBJ_LOOP_ARP(); From cc82beecd159001aeda6a0086b132b0f624b6733 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 23 Dec 2022 06:35:44 +0300 Subject: [PATCH 291/339] [PBCKP-428] optimize read_recovery_info Instead of stepping back by one record, step back by megabyte and then go forward. This is much better both for gzipped wal and for S3. --- src/parsexlog.c | 67 ++++++++++++++++++++++++++++--------------------- 1 file changed, 39 insertions(+), 28 deletions(-) diff --git a/src/parsexlog.c b/src/parsexlog.c index 5d887831b..78f0157e1 100644 --- a/src/parsexlog.c +++ b/src/parsexlog.c @@ -519,6 +519,7 @@ validate_wal(pgBackup *backup, const char *archivedir, } } +#define STEPBACK_CHUNK (1024*1024) /* * Read from archived WAL segments latest recovery time and xid. All necessary * segments present at archive folder. We waited **stop_lsn** in @@ -529,7 +530,8 @@ read_recovery_info(const char *archivedir, TimeLineID tli, uint32 wal_seg_size, XLogRecPtr start_lsn, XLogRecPtr stop_lsn, time_t *recovery_time) { - XLogRecPtr startpoint = stop_lsn; + XLogRecPtr startpoint = stop_lsn - (stop_lsn % STEPBACK_CHUNK); + XLogRecPtr endpoint = stop_lsn; XLogReaderState *xlogreader; XLogReaderData reader_data; bool res; @@ -548,44 +550,53 @@ read_recovery_info(const char *archivedir, TimeLineID tli, uint32 wal_seg_size, /* Read records from stop_lsn down to start_lsn */ do { + XLogRecPtr curpoint; XLogRecord *record; TimestampTz last_time = 0; char *errormsg; -#if PG_VERSION_NUM >= 130000 - if (XLogRecPtrIsInvalid(startpoint)) - startpoint = SizeOfXLogShortPHD; - XLogBeginRead(xlogreader, startpoint); -#endif + curpoint = startpoint; + if (curpoint < start_lsn) + curpoint = start_lsn; - record = WalReadRecord(xlogreader, startpoint, &errormsg); - if (record == NULL) - { - XLogRecPtr errptr; + curpoint = XLogFindNextRecord(xlogreader, curpoint); - errptr = startpoint ? startpoint : xlogreader->EndRecPtr; + do { + record = WalReadRecord(xlogreader, curpoint, &errormsg); + if (record == NULL) + { + XLogRecPtr errptr; - if (errormsg) - elog(ERROR, "Could not read WAL record at %X/%X: %s", - (uint32) (errptr >> 32), (uint32) (errptr), - errormsg); - else - elog(ERROR, "Could not read WAL record at %X/%X", - (uint32) (errptr >> 32), (uint32) (errptr)); - } + errptr = curpoint ? curpoint : xlogreader->EndRecPtr; - /* Read previous record */ - startpoint = record->xl_prev; + if (errormsg) + elog(ERROR, "Could not read WAL record at %X/%X: %s", + (uint32) (errptr >> 32), (uint32) (errptr), + errormsg); + else + elog(ERROR, "Could not read WAL record at %X/%X", + (uint32) (errptr >> 32), (uint32) (errptr)); - if (getRecordTimestamp(xlogreader, &last_time)) - { - *recovery_time = timestamptz_to_time_t(last_time); + /* for compatibility with Pg < 13 */ + curpoint = InvalidXLogRecPtr; + + if (getRecordTimestamp(xlogreader, &last_time)) + { + *recovery_time = timestamptz_to_time_t(last_time); + + /* Found timestamp in WAL record 'record' */ + res = true; + } + } + } while (xlogreader->EndRecPtr < endpoint+1); - /* Found timestamp in WAL record 'record' */ - res = true; + if (res) goto cleanup; - } - } while (startpoint >= start_lsn); + + /* Goto previous megabyte */ + endpoint = startpoint-1; + startpoint -= STEPBACK_CHUNK; + } while (startpoint + STEPBACK_CHUNK >= start_lsn); /* Didn't find timestamp from WAL records between start_lsn and stop_lsn */ res = false; From 80caef108b4101e44f8177bf29255c9e94ec32ce Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 23 Dec 2022 07:47:22 +0300 Subject: [PATCH 292/339] fix auth_test.test_backup_via_unprivileged_user --- tests/auth_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/auth_test.py b/tests/auth_test.py index a9706a481..748cdee55 100644 --- a/tests/auth_test.py +++ b/tests/auth_test.py @@ -123,7 +123,7 @@ def test_backup_via_unprivileged_user(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - if self.get_vestion(node) < self.version_to_num('15.0'): + if self.get_version(node) < self.version_to_num('15.0'): node.safe_psql( "postgres", "GRANT EXECUTE ON FUNCTION pg_stop_backup() TO backup; " From 46517a2fa035cdf882bb8cc98f4d518f0c4bb521 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 23 Dec 2022 07:59:15 +0300 Subject: [PATCH 293/339] fix test_backup_concurrent_drop_table --- src/data.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/src/data.c b/src/data.c index 5f00eb942..55e32f2ed 100644 --- a/src/data.c +++ b/src/data.c @@ -1216,14 +1216,13 @@ send_file(pioDrive_i db_drive, pioDrive_i backup_drive, const char *to_fullpath, .permissions = file->mode, .sync = sync, .err = &err); if($haserr(err)) - elog(ERROR, "Cannot open destination file \"%s\": %s", - to_fullpath, $errmsg(err)); + return $iresult(err); /* open from_fullpath */ in = $i(pioOpenReadStream, db_drive, .path = from_fullpath, .err = &err); if($haserr(err)) - goto cleanup; + return $iresult(err); /* * Copy content and calc CRC as it gets copied. Optionally pioZeroTail @@ -1241,7 +1240,7 @@ send_file(pioDrive_i db_drive, pioDrive_i backup_drive, const char *to_fullpath, NULL); if($haserr(err)) - goto cleanup; + return $iresult(err); if (file) { file->crc = pioCRC32Counter_getCRC32(c); @@ -1253,11 +1252,9 @@ send_file(pioDrive_i db_drive, pioDrive_i backup_drive, const char *to_fullpath, file->uncompressed_size = file->read_size; } -cleanup: $i(pioClose, in); - $i(pioClose, out); + err = $i(pioClose, out); - // has $noerr() by default return $iresult(err); } From eecba5f4aa87203e2c9aa4b5024a606e4cb57fa9 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 23 Dec 2022 08:04:50 +0300 Subject: [PATCH 294/339] fix test_remove_instance_config --- tests/config_test.py | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/tests/config_test.py b/tests/config_test.py index b1a0f9295..595b12ef1 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -34,21 +34,9 @@ def test_remove_instance_config(self): os.unlink(os.path.join(backup_dir, 'backups','node', 'pg_probackup.conf')) - try: + with self.assertRaisesRegex(ProbackupException, r'ERROR: Reading instance control.*No such file'): self.backup_node( backup_dir, 'node', node, backup_type='page') - self.assertEqual( - 1, 0, - "Expecting Error because pg_probackup.conf is missing. " - ".\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: could not open file "{0}": ' - 'No such file or directory'.format(conf_file), - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) # @unittest.expectedFailure # @unittest.skip("skip") From a394ea3a5de56111c26949288cc94745429b2bb3 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 23 Dec 2022 08:18:02 +0300 Subject: [PATCH 295/339] fix test_missing_replication_permission_1 : fix freing xlogFile xlogFile did contain pgFile in its start. Therefore pgFileFree were used to free it. Not xlogFile doesn't contain pgFile, so its free procedure should be fixed. --- src/catalog.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index 47de224b4..1b1caab73 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -63,13 +63,20 @@ timelineInfoNew(TimeLineID tli) return tlinfo; } +static void +xlogFile_free(xlogFile* fl) +{ + ft_str_free(&fl->name); + ft_free(fl); +} + /* free timelineInfo object */ void timelineInfoFree(void *tliInfo) { timelineInfo *tli = (timelineInfo *) tliInfo; - parray_walk(tli->xlog_filelist, pgFileFree); + parray_walk(tli->xlog_filelist, xlogFile_free); parray_free(tli->xlog_filelist); if (tli->backups) @@ -1668,7 +1675,7 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) } /* append file to xlog file list */ - wal_file = palloc(sizeof(xlogFile)); + wal_file = ft_calloc(sizeof(xlogFile)); wal_file->name = ft_str_steal(&file.name); wal_file->size = file.stat.pst_size; wal_file->segno = segno; @@ -1690,7 +1697,7 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) } /* append file to xlog file list */ - wal_file = palloc(sizeof(xlogFile)); + wal_file = ft_calloc(sizeof(xlogFile)); wal_file->name = ft_str_steal(&file.name); wal_file->size = file.stat.pst_size; wal_file->segno = segno; @@ -1712,7 +1719,7 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) } /* append file to xlog file list */ - wal_file = palloc(sizeof(xlogFile)); + wal_file = ft_calloc(sizeof(xlogFile)); wal_file->name = ft_str_steal(&file.name); wal_file->size = file.stat.pst_size; wal_file->segno = segno; @@ -1775,7 +1782,7 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) tlinfo->size += file.stat.pst_size; /* append file to xlog file list */ - wal_file = palloc(sizeof(xlogFile)); + wal_file = ft_calloc(sizeof(xlogFile)); wal_file->name = ft_str_steal(&file.name); wal_file->size = file.stat.pst_size; wal_file->segno = segno; From d8c41ae1ebb2e9525c1ae7390ca3678307be3060 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 23 Dec 2022 08:41:53 +0300 Subject: [PATCH 296/339] fix init tests --- src/init.c | 2 +- src/pg_probackup.c | 6 +++--- src/utils/file.c | 2 +- tests/init_test.py | 33 ++++++--------------------------- 4 files changed, 11 insertions(+), 32 deletions(-) diff --git a/src/init.c b/src/init.c index 633746458..849c8535b 100644 --- a/src/init.c +++ b/src/init.c @@ -111,7 +111,7 @@ do_add_instance(InstanceState *instanceState, InstanceConfig *instance) if ($haserr(err)) ft_logerr(FT_FATAL, $errmsg(err), "Check instance"); if (exists) - elog(ERROR, "Instance '%s' %s directory alredy exists: '%s'", + elog(ERROR, "Instance '%s' %s directory already exists: '%s'", instanceState->instance_name, paths[i][0], paths[i][1]); } } diff --git a/src/pg_probackup.c b/src/pg_probackup.c index bf49e90ac..f57d12799 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -532,11 +532,11 @@ pbk_main(int argc, char *argv[]) instanceState->instance_backup_subdir_path); // TODO: redundant message, should we get rid of it? - elog(ERROR, "Instance '%s' does not exist in this backup catalog", - instance_name); + elog(ERROR, "-B, --backup-path must be a path to directory"); } if (!exists) - elog(ERROR, "-B, --backup-path must be a path to directory"); + elog(ERROR, "Instance '%s' does not exist in this backup catalog", + instance_name); } } diff --git a/src/utils/file.c b/src/utils/file.c index 94525a956..acceed4c8 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -2477,7 +2477,7 @@ pioLocalDrive_pioIsDirEmpty(VSelf, path_t path, err_i* err) return false; } - while ((dent = readdir(dir)) != NULL) + for (errno=0;(dent = readdir(dir)) != NULL;errno=0) { if (strcmp(dent->d_name, ".") == 0) continue; diff --git a/tests/init_test.py b/tests/init_test.py index bd1353dee..bbbcdf97c 100644 --- a/tests/init_test.py +++ b/tests/init_test.py @@ -115,35 +115,14 @@ def test_add_instance_idempotence(self): dir_backups = os.path.join(backup_dir, 'backups', 'node') dir_wal = os.path.join(backup_dir, 'wal', 'node') - try: + with open(os.path.join(dir_wal, "0000"), 'w'): + pass + + with self.assertRaisesRegex(ProbackupException, r"'node'.*WAL.*already exists"): self.add_instance(backup_dir, 'node', node) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because page backup should not be possible " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Instance 'node' WAL archive directory already exists: ", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - try: + with self.assertRaisesRegex(ProbackupException, r"'node'.*WAL.*already exists"): self.add_instance(backup_dir, 'node', node) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because page backup should not be possible " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - "ERROR: Instance 'node' WAL archive directory already exists: ", - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) def test_init_backup_catalog_no_access(self): """ Test pg_probackup init -B backup_dir to a dir with no read access. """ @@ -153,7 +132,7 @@ def test_init_backup_catalog_no_access(self): os.makedirs(no_access_dir) os.chmod(no_access_dir, stat.S_IREAD) - expected = 'ERROR: cannot open backup catalog directory "{0}": Permission denied'.format(backup_dir) + expected = 'ERROR: cannot open backup catalog directory.*{0}.*Permission denied'.format(backup_dir) with self.assertRaisesRegex(ProbackupException, expected): self.init_pb(backup_dir) From f9615aeb6b16f967cf6a6bdd0e0cf2d765bd8e6e Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 23 Dec 2022 08:47:56 +0300 Subject: [PATCH 297/339] fix test_merge_external_dir_is_missing --- src/dir.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/dir.c b/src/dir.c index 7f74f4c88..36d248d28 100644 --- a/src/dir.c +++ b/src/dir.c @@ -563,6 +563,10 @@ db_list_dir(parray *files, const char* root, /* XXX: why the hell it is "ok" for non-external directories? */ return; } + if (getErrno(err) == ENOENT && external_dir_num != 0) + { + ft_logerr(FT_FATAL, $errmsg(err), "External directory is not found"); + } if (getErrno(err) == ENOTDIR && external_dir_num != 0) { elog(ERROR, " --external-dirs option \"%s\": directory or symbolic link expected", From c83d69c3598bebfcf2963ffbe0c9cf3c4377bf8f Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 23 Dec 2022 09:00:51 +0300 Subject: [PATCH 298/339] fix test_continue_failed_merge_2 --- tests/merge_test.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/merge_test.py b/tests/merge_test.py index 6fda0a31b..eb57463fe 100644 --- a/tests/merge_test.py +++ b/tests/merge_test.py @@ -1137,6 +1137,9 @@ def test_continue_failed_merge_2(self): gdb.run_until_break() gdb._execute('thread apply all bt') + gdb.remove_all_breakpoints() + + gdb.set_breakpoint('pioRemoveDir__do') gdb.continue_execution_until_break(20) From fe82f4e45e0048f8543652b099f17afa61ffd3eb Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 23 Dec 2022 09:20:42 +0300 Subject: [PATCH 299/339] test_recovery_target_lsn_backup_victim - looks like it should pass and not fail --- tests/false_positive_test.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/false_positive_test.py b/tests/false_positive_test.py index eafa1ee07..c8b6cd7ff 100644 --- a/tests/false_positive_test.py +++ b/tests/false_positive_test.py @@ -200,13 +200,16 @@ def test_recovery_target_time_backup_victim(self): backup_dir, 'node', options=['--recovery-target-time={0}'.format(target_time)]) - @unittest.expectedFailure + # @unittest.expectedFailure # @unittest.skip("skip") def test_recovery_target_lsn_backup_victim(self): """ Check that for validation to recovery target probackup chooses valid backup https://github.com/postgrespro/pg_probackup/issues/104 + + @y.sokolov: looks like this test should pass. + So I commented 'expectedFailure' """ backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( From 4418ec957257966a86d5433f6fc5a7c3d0fc4896 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 23 Dec 2022 10:58:02 +0300 Subject: [PATCH 300/339] [PBCKP-428] fix "optimize read_recovery_info" Well, we couldn't easily step back be 1MB, since it could be in uninitialized tail of previous segment. We need to use valid pointer in record->xl_prev. --- src/parsexlog.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/src/parsexlog.c b/src/parsexlog.c index 78f0157e1..83aba3d52 100644 --- a/src/parsexlog.c +++ b/src/parsexlog.c @@ -551,6 +551,7 @@ read_recovery_info(const char *archivedir, TimeLineID tli, uint32 wal_seg_size, do { XLogRecPtr curpoint; + XLogRecPtr prevpoint = 0; XLogRecord *record; TimestampTz last_time = 0; char *errormsg; @@ -563,6 +564,8 @@ read_recovery_info(const char *archivedir, TimeLineID tli, uint32 wal_seg_size, do { record = WalReadRecord(xlogreader, curpoint, &errormsg); + if (prevpoint == 0) + prevpoint = record->xl_prev; if (record == NULL) { XLogRecPtr errptr; @@ -576,27 +579,29 @@ read_recovery_info(const char *archivedir, TimeLineID tli, uint32 wal_seg_size, else elog(ERROR, "Could not read WAL record at %X/%X", (uint32) (errptr >> 32), (uint32) (errptr)); + } - /* for compatibility with Pg < 13 */ - curpoint = InvalidXLogRecPtr; + /* for compatibility with Pg < 13 */ + curpoint = InvalidXLogRecPtr; - if (getRecordTimestamp(xlogreader, &last_time)) - { - *recovery_time = timestamptz_to_time_t(last_time); + if (getRecordTimestamp(xlogreader, &last_time)) + { + *recovery_time = timestamptz_to_time_t(last_time); - /* Found timestamp in WAL record 'record' */ - res = true; - } + /* Found timestamp in WAL record 'record' */ + res = true; } - } while (xlogreader->EndRecPtr < endpoint+1); + } while (xlogreader->EndRecPtr < endpoint); if (res) goto cleanup; /* Goto previous megabyte */ - endpoint = startpoint-1; - startpoint -= STEPBACK_CHUNK; - } while (startpoint + STEPBACK_CHUNK >= start_lsn); + endpoint = startpoint; + startpoint = prevpoint - (prevpoint % STEPBACK_CHUNK); + if (startpoint < start_lsn) + startpoint = start_lsn; + } while (endpoint > start_lsn); /* Didn't find timestamp from WAL records between start_lsn and stop_lsn */ res = false; From 2904405a5faf8801f22e47558d8bb71bbbdaa517 Mon Sep 17 00:00:00 2001 From: Victor Spirin Date: Wed, 21 Dec 2022 11:19:34 +0300 Subject: [PATCH 301/339] [PBCKP-270] Added pio file functions for compatibility/walmethods.c --- src/compatibility/receivelog.c | 94 +++++---------- src/compatibility/receivelog.h | 5 - src/compatibility/streamutil.c | 1 + src/compatibility/walmethods.c | 209 ++++++++++++++------------------- src/compatibility/walmethods.h | 6 +- src/stream.c | 8 +- 6 files changed, 124 insertions(+), 199 deletions(-) diff --git a/src/compatibility/receivelog.c b/src/compatibility/receivelog.c index 5b32ae7bb..979b448eb 100644 --- a/src/compatibility/receivelog.c +++ b/src/compatibility/receivelog.c @@ -74,7 +74,7 @@ mark_file_as_archived(StreamCtl *stream, const char *fname) snprintf(tmppath, sizeof(tmppath), "archive_status/%s.done", fname); - f = stream->walmethod->open_for_write(tmppath, NULL, 0); + f = stream->walmethod->open_for_write(tmppath); if (f == NULL) { elog(ERROR, "could not create archive status file \"%s\": %s", @@ -113,8 +113,7 @@ open_walfile(StreamCtl *stream, XLogRecPtr startpoint) XLogFileName(current_walfile_name, stream->timeline, segno, WalSegSz); /* Note that this considers the compression used if necessary */ - fn = stream->walmethod->get_file_name(current_walfile_name, - stream->partial_suffix); + fn = stream->walmethod->get_file_name(current_walfile_name); /* * When streaming to files, if an existing file exists we verify that it's @@ -140,7 +139,7 @@ open_walfile(StreamCtl *stream, XLogRecPtr startpoint) if (size == WalSegSz) { /* Already padded file. Open it for use */ - f = stream->walmethod->open_for_write(current_walfile_name, stream->partial_suffix, 0); + f = stream->walmethod->open_for_write(current_walfile_name); if (f == NULL) { elog(ERROR, "could not open existing write-ahead log file \"%s\": %s", @@ -153,7 +152,7 @@ open_walfile(StreamCtl *stream, XLogRecPtr startpoint) if (stream->walmethod->sync(f) != 0) { elog(ERROR, "could not fsync existing write-ahead log file \"%s\": %s", - fn, stream->walmethod->getlasterror());//FATAL + fn, stream->walmethod->getlasterror()); stream->walmethod->close(f, CLOSE_UNLINK); exit(1); } @@ -179,8 +178,7 @@ open_walfile(StreamCtl *stream, XLogRecPtr startpoint) /* No file existed, so create one */ - f = stream->walmethod->open_for_write(current_walfile_name, - stream->partial_suffix, WalSegSz); + f = stream->walmethod->open_for_write(current_walfile_name); if (f == NULL) { elog(ERROR, "could not open write-ahead log file \"%s\": %s", @@ -218,20 +216,32 @@ close_walfile(StreamCtl *stream, XLogRecPtr pos) return false; } - - if (stream->partial_suffix) + /* + * Pad file to WalSegSz size by zero bytes + */ + if (currpos < WalSegSz) { - if (currpos == WalSegSz) - r = stream->walmethod->close(walfile, CLOSE_NORMAL); - else + char *tempbuf = pgut_malloc0(XLOG_BLCKSZ); + int needWrite = WalSegSz - currpos; + int cnt; + while (needWrite > 0) { - elog(INFO, "not renaming \"%s%s\", segment is not complete", - current_walfile_name, stream->partial_suffix); - r = stream->walmethod->close(walfile, CLOSE_NO_RENAME); + + cnt = needWrite > XLOG_BLCKSZ ? XLOG_BLCKSZ : needWrite; + if (stream->walmethod->write(walfile, tempbuf, cnt) != cnt) + { + elog(ERROR, "failed to append file \"%s\": %s", + current_walfile_name, stream->walmethod->getlasterror()); + stream->walmethod->close(walfile, CLOSE_NORMAL); + walfile = NULL; + pgut_free(tempbuf); + return false; + } + needWrite -= cnt; } + pgut_free(tempbuf); } - else - r = stream->walmethod->close(walfile, CLOSE_NORMAL); + r = stream->walmethod->close(walfile, CLOSE_NORMAL); walfile = NULL; @@ -242,19 +252,6 @@ close_walfile(StreamCtl *stream, XLogRecPtr pos) return false; } - /* - * Mark file as archived if requested by the caller - pg_basebackup needs - * to do so as files can otherwise get archived again after promotion of a - * new node. This is in line with walreceiver.c always doing a - * XLogArchiveForceDone() after a complete segment. - */ - if (currpos == WalSegSz && stream->mark_done) - { - /* writes error message if failed */ - if (!mark_file_as_archived(stream, current_walfile_name)) - return false; - } - lastFlushPosition = pos; return true; } @@ -299,7 +296,7 @@ writeTimeLineHistoryFile(StreamCtl *stream, char *filename, char *content) return false; } - f = stream->walmethod->open_for_write(histfname, ".tmp", 0); + f = stream->walmethod->open_for_write(histfname); if (f == NULL) { pg_log_error("could not create timeline history file \"%s\": %s", @@ -327,14 +324,6 @@ writeTimeLineHistoryFile(StreamCtl *stream, char *filename, char *content) return false; } - /* Maintain archive_status, check close_walfile() for details. */ - if (stream->mark_done) - { - /* writes error message if failed */ - if (!mark_file_as_archived(stream, histfname)) - return false; - } - return true; } @@ -490,10 +479,6 @@ ReceiveXlogStream(PGconn *conn, StreamCtl *stream) } else { - if (stream->synchronous) - reportFlushPosition = true; - else - reportFlushPosition = false; slotcmd[0] = 0; } @@ -777,29 +762,6 @@ HandleCopyStream(PGconn *conn, StreamCtl *stream, now = feGetCurrentTimestamp(); - /* - * If synchronous option is true, issue sync command as soon as there - * are WAL data which has not been flushed yet. - */ - if (stream->synchronous && lastFlushPosition < blockpos && walfile != NULL) - { - if (stream->walmethod->sync(walfile) != 0) - { - pg_log_fatal("could not fsync file \"%s\": %s", - current_walfile_name, stream->walmethod->getlasterror()); - exit(1); - } - lastFlushPosition = blockpos; - - /* - * Send feedback so that the server sees the latest WAL locations - * immediately. - */ - if (!sendFeedback(conn, blockpos, now, false)) - goto error; - last_status = now; - } - /* * Potentially send a status message to the primary */ diff --git a/src/compatibility/receivelog.h b/src/compatibility/receivelog.h index e04333bf8..02dc6ebd2 100644 --- a/src/compatibility/receivelog.h +++ b/src/compatibility/receivelog.h @@ -33,10 +33,6 @@ typedef struct StreamCtl char *sysidentifier; /* Validate this system identifier and * timeline */ int standby_message_timeout; /* Send status messages this often */ - bool synchronous; /* Flush immediately WAL data on write */ - bool mark_done; /* Mark segment as done in generated archive */ - bool do_sync; /* Flush to disk to ensure consistent state of - * data */ stream_stop_callback stream_stop; /* Stop streaming when returns true */ @@ -44,7 +40,6 @@ typedef struct StreamCtl * and check stream_stop() when there is any */ WalWriteMethod *walmethod; /* How to write the WAL */ - char *partial_suffix; /* Suffix appended to partially received files */ char *replication_slot; /* Replication slot to use, or NULL */ } StreamCtl; diff --git a/src/compatibility/streamutil.c b/src/compatibility/streamutil.c index 187cc270e..14268503a 100644 --- a/src/compatibility/streamutil.c +++ b/src/compatibility/streamutil.c @@ -12,6 +12,7 @@ *------------------------------------------------------------------------- */ +#include "pg_probackup.h" #include "postgres_fe.h" #include diff --git a/src/compatibility/walmethods.c b/src/compatibility/walmethods.c index 29eb08f68..fe8cfc757 100644 --- a/src/compatibility/walmethods.c +++ b/src/compatibility/walmethods.c @@ -52,6 +52,7 @@ typedef struct DirectoryMethodData bool sync; const char *lasterrstring; /* if set, takes precedence over lasterrno */ int lasterrno; + pioDrive_i drive; } DirectoryMethodData; static DirectoryMethodData *dir_data = NULL; @@ -60,11 +61,11 @@ static DirectoryMethodData *dir_data = NULL; */ typedef struct DirectoryMethodFile { - int fd; + pioWriteCloser_i fd; off_t currpos; char *pathname; char *fullpath; - char *temp_suffix; + //char *temp_suffix;/* todo: remove temp_suffix - S3 not support rename, pioOpenRewrite ust temp files fot local file operations */ #ifdef HAVE_LIBZ gzFile gzfp; #endif @@ -84,31 +85,33 @@ dir_getlasterror(void) } static char * -dir_get_file_name(const char *pathname, const char *temp_suffix) +dir_get_file_name(const char *pathname) { char *filename = pg_malloc0(MAXPGPATH * sizeof(char)); - snprintf(filename, MAXPGPATH, "%s%s%s", - pathname, dir_data->compression > 0 ? ".gz" : "", - temp_suffix ? temp_suffix : ""); + snprintf(filename, MAXPGPATH, "%s%s", + pathname, dir_data->compression > 0 ? ".gz" : ""); return filename; } static Walfile -dir_open_for_write(const char *pathname, const char *temp_suffix, size_t pad_to_size) +dir_open_for_write(const char *pathname) { + FOBJ_FUNC_ARP(); char tmppath[MAXPGPATH]; char *filename; - int fd; + pioWriteCloser_i fd; DirectoryMethodFile *f; + err_i err = $noerr(); + #ifdef HAVE_LIBZ gzFile gzfp = NULL; #endif dir_clear_error(); - filename = dir_get_file_name(pathname, temp_suffix); + filename = dir_get_file_name(pathname); snprintf(tmppath, sizeof(tmppath), "%s/%s", dir_data->basedir, filename); pg_free(filename); @@ -119,16 +122,17 @@ dir_open_for_write(const char *pathname, const char *temp_suffix, size_t pad_to_ * does not do any system calls to fsync() to make changes permanent on * disk. */ - fd = open(tmppath, O_WRONLY | O_CREAT | PG_BINARY, pg_file_create_mode); - if (fd < 0) + fd = $i(pioOpenRewrite, dir_data->drive, tmppath, O_WRONLY | O_CREAT | PG_BINARY, .err = &err); + if ($haserr(err)) { - dir_data->lasterrno = errno; + dir_data->lasterrno = getErrno(err); return NULL; } #ifdef HAVE_LIBZ if (dir_data->compression > 0) { + /* vvs gzfp = gzdopen(fd, "wb"); if (gzfp == NULL) { @@ -144,36 +148,10 @@ dir_open_for_write(const char *pathname, const char *temp_suffix, size_t pad_to_ gzclose(gzfp); return NULL; } + */ } #endif - /* Do pre-padding on non-compressed files */ - if (pad_to_size && dir_data->compression == 0) - { - PGAlignedXLogBlock zerobuf; - int bytes; - - memset(zerobuf.data, 0, XLOG_BLCKSZ); - for (bytes = 0; bytes < pad_to_size; bytes += XLOG_BLCKSZ) - { - errno = 0; - if (write(fd, zerobuf.data, XLOG_BLCKSZ) != XLOG_BLCKSZ) - { - /* If write didn't set errno, assume problem is no disk space */ - dir_data->lasterrno = errno ? errno : ENOSPC; - close(fd); - return NULL; - } - } - - if (lseek(fd, 0, SEEK_SET) != 0) - { - dir_data->lasterrno = errno; - close(fd); - return NULL; - } - } - /* * fsync WAL file and containing directory, to ensure the file is * persistently created and zeroed (if padded). That's particularly @@ -182,6 +160,15 @@ dir_open_for_write(const char *pathname, const char *temp_suffix, size_t pad_to_ */ if (dir_data->sync) { + err = $i(pioWriteFinish, fd); + + if ($haserr(err)) + { + dir_data->lasterrno =getErrno(err); + $i(pioClose, fd); + return NULL; + } + /* vvs if (fsync_fname_compat(tmppath, false) != 0 || fsync_parent_path_compat(tmppath) != 0) { @@ -194,6 +181,7 @@ dir_open_for_write(const char *pathname, const char *temp_suffix, size_t pad_to_ close(fd); return NULL; } + */ } f = pg_malloc0(sizeof(DirectoryMethodFile)); @@ -205,8 +193,6 @@ dir_open_for_write(const char *pathname, const char *temp_suffix, size_t pad_to_ f->currpos = 0; f->pathname = pg_strdup(pathname); f->fullpath = pg_strdup(tmppath); - if (temp_suffix) - f->temp_suffix = pg_strdup(temp_suffix); return f; } @@ -214,33 +200,40 @@ dir_open_for_write(const char *pathname, const char *temp_suffix, size_t pad_to_ static ssize_t dir_write(Walfile f, const void *buf, size_t count) { - ssize_t r; + FOBJ_FUNC_ARP(); + ssize_t r = 0; DirectoryMethodFile *df = (DirectoryMethodFile *) f; Assert(f != NULL); dir_clear_error(); + ft_bytes_t fBuf; + err_i err = $noerr(); #ifdef HAVE_LIBZ if (dir_data->compression > 0) { + /* vvs errno = 0; r = (ssize_t) gzwrite(df->gzfp, buf, count); if (r != count) { - /* If write didn't set errno, assume problem is no disk space */ dir_data->lasterrno = errno ? errno : ENOSPC; } + */ } else #endif { errno = 0; - r = write(df->fd, buf, count); - if (r != count) + fBuf = ft_bytes((void *)buf, count); + err = $i(pioWrite, df->fd, fBuf); + if ($haserr(err)) { - /* If write didn't set errno, assume problem is no disk space */ - dir_data->lasterrno = errno ? errno : ENOSPC; + dir_data->lasterrno = getErrno(err) ? getErrno(err) : ENOSPC; } + else r = count; + + } if (r > 0) df->currpos += r; @@ -260,10 +253,11 @@ dir_get_current_pos(Walfile f) static int dir_close(Walfile f, WalCloseMethod method) { - int r; + int r = 0; DirectoryMethodFile *df = (DirectoryMethodFile *) f; char tmppath[MAXPGPATH]; char tmppath2[MAXPGPATH]; + err_i err = $noerr(); Assert(f != NULL); dir_clear_error(); @@ -276,66 +270,31 @@ dir_close(Walfile f, WalCloseMethod method) } else #endif - r = close(df->fd); - - if (r == 0) + err = $i(pioClose, df->fd, dir_data->sync); + if ($haserr(err)) { - /* Build path to the current version of the file */ - if (method == CLOSE_NORMAL && df->temp_suffix) - { - char *filename; - char *filename2; - - /* - * If we have a temp prefix, normal operation is to rename the - * file. - */ - filename = dir_get_file_name(df->pathname, df->temp_suffix); - snprintf(tmppath, sizeof(tmppath), "%s/%s", - dir_data->basedir, filename); - pg_free(filename); - - /* permanent name, so no need for the prefix */ - filename2 = dir_get_file_name(df->pathname, NULL); - snprintf(tmppath2, sizeof(tmppath2), "%s/%s", - dir_data->basedir, filename2); - pg_free(filename2); - r = durable_rename_compat(tmppath, tmppath2); - } - else if (method == CLOSE_UNLINK) - { - char *filename; - - /* Unlink the file once it's closed */ - filename = dir_get_file_name(df->pathname, df->temp_suffix); - snprintf(tmppath, sizeof(tmppath), "%s/%s", - dir_data->basedir, filename); - pg_free(filename); - r = unlink(tmppath); - } - else - { - /* - * Else either CLOSE_NORMAL and no temp suffix, or - * CLOSE_NO_RENAME. In this case, fsync the file and containing - * directory if sync mode is requested. - */ - if (dir_data->sync) - { - r = fsync_fname_compat(df->fullpath, false); - if (r == 0) - r = fsync_parent_path_compat(df->fullpath); - } - } + dir_data->lasterrno = getErrno(err); + r = -1; + } + else if (method == CLOSE_UNLINK) + { + char *filename; + + /* Unlink the file once it's closed */ + filename = dir_get_file_name(df->pathname); + snprintf(tmppath, sizeof(tmppath), "%s/%s", + dir_data->basedir, filename); + pg_free(filename); + err = $i(pioRemove, dir_data->drive, tmppath); } - if (r != 0) - dir_data->lasterrno = errno; + if ($haserr(err)){ + dir_data->lasterrno = getErrno(err); + r = -1; + } pg_free(df->pathname); pg_free(df->fullpath); - if (df->temp_suffix) - pg_free(df->temp_suffix); pg_free(df); return r; @@ -344,8 +303,7 @@ dir_close(Walfile f, WalCloseMethod method) static int dir_sync(Walfile f) { - int r; - + err_i err = $noerr(); Assert(f != NULL); dir_clear_error(); @@ -363,28 +321,33 @@ dir_sync(Walfile f) } #endif - r = fsync(((DirectoryMethodFile *) f)->fd); - if (r < 0) - dir_data->lasterrno = errno; - return r; + err = $i(pioWriteFinish, ((DirectoryMethodFile *) f)->fd); + if ($haserr(err)) + { + dir_data->lasterrno = getErrno(err); + return -1; + } + return 0; } static ssize_t dir_get_file_size(const char *pathname) { - struct stat statbuf; + pio_stat_t statbuf; char tmppath[MAXPGPATH]; + err_i err = $noerr(); snprintf(tmppath, sizeof(tmppath), "%s/%s", dir_data->basedir, pathname); - if (stat(tmppath, &statbuf) != 0) + statbuf = $i(pioStat, dir_data->drive, .err = &err); + if ($haserr(err)) { - dir_data->lasterrno = errno; + dir_data->lasterrno = getErrno(err); return -1; } - return statbuf.st_size; + return statbuf.pst_size; } static int @@ -397,18 +360,23 @@ static bool dir_existsfile(const char *pathname) { char tmppath[MAXPGPATH]; - int fd; + bool ret; + + err_i err = $noerr(); dir_clear_error(); snprintf(tmppath, sizeof(tmppath), "%s/%s", dir_data->basedir, pathname); - fd = open(tmppath, O_RDONLY | PG_BINARY, 0); - if (fd < 0) - return false; - close(fd); - return true; + ret = $i(pioExists, dir_data->drive, .path = tmppath, .err = &err); + if ($haserr(err)) + { + dir_data->lasterrno = getErrno(err); + } + + return ret; + } static bool @@ -422,18 +390,20 @@ dir_finish(void) * Files are fsynced when they are closed, but we need to fsync the * directory entry here as well. */ + /* vvs temp if (fsync_fname_compat(dir_data->basedir, true) != 0) { dir_data->lasterrno = errno; return false; } + */ } return true; } WalWriteMethod * -CreateWalDirectoryMethod(const char *basedir, int compression, bool sync) +CreateWalDirectoryMethod(const char *basedir, int compression, bool sync, pioDrive_i drive) { WalWriteMethod *method; @@ -454,6 +424,7 @@ CreateWalDirectoryMethod(const char *basedir, int compression, bool sync) dir_data->compression = compression; dir_data->basedir = pg_strdup(basedir); dir_data->sync = sync; + dir_data->drive = drive; return method; } diff --git a/src/compatibility/walmethods.h b/src/compatibility/walmethods.h index e1e3aacf3..f20ade905 100644 --- a/src/compatibility/walmethods.h +++ b/src/compatibility/walmethods.h @@ -38,7 +38,7 @@ struct WalWriteMethod * automatically renamed in close(). If pad_to_size is specified, the file * will be padded with NUL up to that size, if supported by the Walmethod. */ - Walfile (*open_for_write) (const char *pathname, const char *temp_suffix, size_t pad_to_size); + Walfile (*open_for_write) (const char *pathname); /* * Close an open Walfile, using one or more methods for handling automatic @@ -56,7 +56,7 @@ struct WalWriteMethod * Return the name of the current file to work on in pg_malloc()'d string, * without the base directory. This is useful for logging. */ - char *(*get_file_name) (const char *pathname, const char *temp_suffix); + char *(*get_file_name) (const char *pathname); /* Return the level of compression */ int (*compression) (void); @@ -95,7 +95,7 @@ struct WalWriteMethod * not all those required for pg_receivewal) */ WalWriteMethod *CreateWalDirectoryMethod(const char *basedir, - int compression, bool sync); + int compression, bool sync, pioDrive_i drive); /* Cleanup routines for previously-created methods */ void FreeWalDirectoryMethod(void); diff --git a/src/stream.c b/src/stream.c index 1b97a64d1..d3797d100 100644 --- a/src/stream.c +++ b/src/stream.c @@ -242,19 +242,15 @@ StreamLog(void *arg) ctl.sysidentifier = NULL; ctl.stream_stop = stop_streaming; ctl.standby_message_timeout = standby_message_timeout; - ctl.partial_suffix = NULL; - ctl.synchronous = false; - ctl.mark_done = false; ctl.walmethod = CreateWalDirectoryMethod( stream_arg->basedir, 0, - false); + false, + pioDriveForLocation(FIO_BACKUP_HOST)); ctl.replication_slot = replication_slot; ctl.stop_socket = PGINVALID_SOCKET; - ctl.do_sync = false; /* We sync all files at the end of backup */ -// ctl.mark_done /* for future use in s3 */ if (ReceiveXlogStream(stream_arg->conn, &ctl) == false) { From 8b46c8af6379155b10fbb0bfb74f9d8a48f7c2fd Mon Sep 17 00:00:00 2001 From: Victor Spirin Date: Thu, 22 Dec 2022 12:35:00 +0300 Subject: [PATCH 302/339] [PBCKP-270] Fixed some functions for WAL stream --- src/compatibility/receivelog.c | 32 +++----------------------------- src/compatibility/walmethods.c | 11 +++++------ src/compatibility/walmethods.h | 2 +- src/stream.c | 1 + 4 files changed, 10 insertions(+), 36 deletions(-) diff --git a/src/compatibility/receivelog.c b/src/compatibility/receivelog.c index 979b448eb..207d12ef0 100644 --- a/src/compatibility/receivelog.c +++ b/src/compatibility/receivelog.c @@ -65,32 +65,6 @@ static long CalculateCopyStreamSleeptime(TimestampTz now, int standby_message_ti static bool ReadEndOfStreamingResult(PGresult *res, XLogRecPtr *startpos, uint32 *timeline); -static bool -mark_file_as_archived(StreamCtl *stream, const char *fname) -{ - Walfile *f; - static char tmppath[MAXPGPATH]; - - snprintf(tmppath, sizeof(tmppath), "archive_status/%s.done", - fname); - - f = stream->walmethod->open_for_write(tmppath); - if (f == NULL) - { - elog(ERROR, "could not create archive status file \"%s\": %s", - tmppath, stream->walmethod->getlasterror()); - return false; - } - - if (stream->walmethod->close(f, CLOSE_NORMAL) != 0) - { - elog(ERROR, "could not close archive status file \"%s\": %s", - tmppath, stream->walmethod->getlasterror()); - return false; - } - - return true; -} /* * Open a new WAL file in the specified directory. @@ -139,7 +113,7 @@ open_walfile(StreamCtl *stream, XLogRecPtr startpoint) if (size == WalSegSz) { /* Already padded file. Open it for use */ - f = stream->walmethod->open_for_write(current_walfile_name); + f = stream->walmethod->open_for_write(current_walfile_name, false); if (f == NULL) { elog(ERROR, "could not open existing write-ahead log file \"%s\": %s", @@ -178,7 +152,7 @@ open_walfile(StreamCtl *stream, XLogRecPtr startpoint) /* No file existed, so create one */ - f = stream->walmethod->open_for_write(current_walfile_name); + f = stream->walmethod->open_for_write(current_walfile_name, false); if (f == NULL) { elog(ERROR, "could not open write-ahead log file \"%s\": %s", @@ -296,7 +270,7 @@ writeTimeLineHistoryFile(StreamCtl *stream, char *filename, char *content) return false; } - f = stream->walmethod->open_for_write(histfname); + f = stream->walmethod->open_for_write(histfname, true); if (f == NULL) { pg_log_error("could not create timeline history file \"%s\": %s", diff --git a/src/compatibility/walmethods.c b/src/compatibility/walmethods.c index fe8cfc757..cced49910 100644 --- a/src/compatibility/walmethods.c +++ b/src/compatibility/walmethods.c @@ -65,7 +65,6 @@ typedef struct DirectoryMethodFile off_t currpos; char *pathname; char *fullpath; - //char *temp_suffix;/* todo: remove temp_suffix - S3 not support rename, pioOpenRewrite ust temp files fot local file operations */ #ifdef HAVE_LIBZ gzFile gzfp; #endif @@ -96,7 +95,7 @@ dir_get_file_name(const char *pathname) } static Walfile -dir_open_for_write(const char *pathname) +dir_open_for_write(const char *pathname, bool use_temp) { FOBJ_FUNC_ARP(); char tmppath[MAXPGPATH]; @@ -122,7 +121,7 @@ dir_open_for_write(const char *pathname) * does not do any system calls to fsync() to make changes permanent on * disk. */ - fd = $i(pioOpenRewrite, dir_data->drive, tmppath, O_WRONLY | O_CREAT | PG_BINARY, .err = &err); + fd = $i(pioOpenRewrite, dir_data->drive, tmppath, .err = &err, .use_temp = use_temp); if ($haserr(err)) { dir_data->lasterrno = getErrno(err); @@ -189,7 +188,7 @@ dir_open_for_write(const char *pathname) if (dir_data->compression > 0) f->gzfp = gzfp; #endif - f->fd = fd; + f->fd = $iref(fd); f->currpos = 0; f->pathname = pg_strdup(pathname); f->fullpath = pg_strdup(tmppath); @@ -256,7 +255,6 @@ dir_close(Walfile f, WalCloseMethod method) int r = 0; DirectoryMethodFile *df = (DirectoryMethodFile *) f; char tmppath[MAXPGPATH]; - char tmppath2[MAXPGPATH]; err_i err = $noerr(); Assert(f != NULL); @@ -270,7 +268,7 @@ dir_close(Walfile f, WalCloseMethod method) } else #endif - err = $i(pioClose, df->fd, dir_data->sync); + err = $i(pioClose, df->fd); if ($haserr(err)) { dir_data->lasterrno = getErrno(err); @@ -295,6 +293,7 @@ dir_close(Walfile f, WalCloseMethod method) pg_free(df->pathname); pg_free(df->fullpath); + $idel(&df->fd); pg_free(df); return r; diff --git a/src/compatibility/walmethods.h b/src/compatibility/walmethods.h index f20ade905..e7d016bb3 100644 --- a/src/compatibility/walmethods.h +++ b/src/compatibility/walmethods.h @@ -38,7 +38,7 @@ struct WalWriteMethod * automatically renamed in close(). If pad_to_size is specified, the file * will be padded with NUL up to that size, if supported by the Walmethod. */ - Walfile (*open_for_write) (const char *pathname); + Walfile (*open_for_write) (const char *pathname, bool use_temp); /* * Close an open Walfile, using one or more methods for handling automatic diff --git a/src/stream.c b/src/stream.c index d3797d100..342e7c4c3 100644 --- a/src/stream.c +++ b/src/stream.c @@ -198,6 +198,7 @@ CreateReplicationSlot_compat(PGconn *conn, const char *slot_name, const char *pl static void * StreamLog(void *arg) { + FOBJ_FUNC_ARP(); StreamThreadArg *stream_arg = (StreamThreadArg *) arg; /* From 71e6895935d1821fb68e18adfbb5c494a4822f50 Mon Sep 17 00:00:00 2001 From: Victor Spirin Date: Thu, 22 Dec 2022 22:14:45 +0300 Subject: [PATCH 303/339] [PBCKP-270] Updated wait_wal_lsn function for begin backup without wal file for streaming WAL. --- src/backup.c | 10 ++++++++++ src/compatibility/receivelog.c | 6 ++++++ src/compatibility/walmethods.h | 1 + src/pg_probackup.h | 1 + 4 files changed, 18 insertions(+) diff --git a/src/backup.c b/src/backup.c index 8cb0eaed4..d693773c4 100644 --- a/src/backup.c +++ b/src/backup.c @@ -1327,6 +1327,16 @@ wait_wal_lsn(const char *wal_segment_dir, XLogRecPtr target_lsn, bool is_start_l } else elog(LOG, "Found WAL segment: %s", wal_segment_path); + + + /* Check current file for stream. It may be not exist in S3 */ + if (!file_exists && segment_only && is_start_lsn && in_stream_dir && try_count > 1) + { + if( isStreamProccessed(wal_segment)) + return InvalidXLogRecPtr; + + } + } if (file_exists) diff --git a/src/compatibility/receivelog.c b/src/compatibility/receivelog.c index 207d12ef0..5cdb568ba 100644 --- a/src/compatibility/receivelog.c +++ b/src/compatibility/receivelog.c @@ -1222,3 +1222,9 @@ CalculateCopyStreamSleeptime(TimestampTz now, int standby_message_timeout, return sleeptime; } + + +bool isStreamProccessed(char *seg_filename) +{ + return still_sending && !strcmp(current_walfile_name, seg_filename); +} \ No newline at end of file diff --git a/src/compatibility/walmethods.h b/src/compatibility/walmethods.h index e7d016bb3..3aab03ec1 100644 --- a/src/compatibility/walmethods.h +++ b/src/compatibility/walmethods.h @@ -85,6 +85,7 @@ struct WalWriteMethod /* Return a text for the last error in this Walfile */ const char *(*getlasterror) (void); + }; /* diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 0a60ee57c..ac9875c47 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1221,6 +1221,7 @@ extern XLogRecPtr wait_wal_lsn(const char *wal_segment_dir, XLogRecPtr lsn, bool extern void wait_wal_and_calculate_stop_lsn(const char *xlog_path, XLogRecPtr stop_lsn, pgBackup *backup); extern int64 calculate_datasize_of_filelist(parray *filelist); +extern bool isStreamProccessed(char *seg_filename); /* Checks, that this file is stream processing. File name without path. */ /* * Slices and arrays for C strings From 3e75df6db5358f05d02c27eedb32ce5d0e90c19c Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 23 Dec 2022 16:24:22 +0300 Subject: [PATCH 304/339] [PBCKP-270] fixes --- src/compatibility/walmethods.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/compatibility/walmethods.c b/src/compatibility/walmethods.c index cced49910..e6f9edd0e 100644 --- a/src/compatibility/walmethods.c +++ b/src/compatibility/walmethods.c @@ -283,7 +283,7 @@ dir_close(Walfile f, WalCloseMethod method) snprintf(tmppath, sizeof(tmppath), "%s/%s", dir_data->basedir, filename); pg_free(filename); - err = $i(pioRemove, dir_data->drive, tmppath); + err = $i(pioRemove, dir_data->drive, tmppath, .missing_ok = false); } if ($haserr(err)){ @@ -339,7 +339,8 @@ dir_get_file_size(const char *pathname) snprintf(tmppath, sizeof(tmppath), "%s/%s", dir_data->basedir, pathname); - statbuf = $i(pioStat, dir_data->drive, .err = &err); + statbuf = $i(pioStat, dir_data->drive, .path = pathname, + .follow_symlink = false, .err = &err); if ($haserr(err)) { dir_data->lasterrno = getErrno(err); From c3430a685f86297e743d94bc8ce5f530f9ba0510 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 23 Dec 2022 16:03:10 +0300 Subject: [PATCH 305/339] [PBCKP-270] fetch current log position directly from streamer. We have streamer in our hand, so we may ask it directly, what are it streaming now. But in case we looks into previous segment, we fallback to log reading since it is already flushed. --- src/backup.c | 54 +++++++++++++++++++++++++++------- src/compatibility/receivelog.c | 13 ++++---- src/compatibility/receivelog.h | 4 ++- src/pg_probackup.h | 2 +- src/stream.c | 34 ++++++++++++--------- 5 files changed, 74 insertions(+), 33 deletions(-) diff --git a/src/backup.c b/src/backup.c index d693773c4..04abd37f1 100644 --- a/src/backup.c +++ b/src/backup.c @@ -1304,6 +1304,50 @@ wait_wal_lsn(const char *wal_segment_dir, XLogRecPtr target_lsn, bool is_start_l elog(LOG, "Looking for LSN %X/%X in segment: %s", (uint32) (target_lsn >> 32), (uint32) target_lsn, wal_segment); + if (in_stream_dir && !in_prev_segment) + { + /* separate simple loop for streaming */ + for (;;) + { + TimeLineID curtli; + XLogRecPtr curptr; + XLogRecPtr prevptr; + + getCurrentStreamPosition(&curtli, &curptr, &prevptr); + if (curtli > tli || (curtli == tli && curptr > target_lsn)) + return target_lsn; + + sleep(1); + if (interrupted || thread_interrupted) + elog(ERROR, "Interrupted during waiting for WAL streaming"); + try_count++; + + /* Inform user if WAL segment is absent in first attempt */ + if (try_count == 1) + { + if (segment_only) + elog(INFO, "Wait for WAL segment %s to be %s", + wal_segment_path, wal_delivery_str); + else + elog(INFO, "Wait for LSN %X/%X in %s WAL segment %s", + (uint32) (target_lsn >> 32), (uint32) target_lsn, + wal_delivery_str, wal_segment_path); + } + + if (current.from_replica && + (XRecOffIsNull(target_lsn) || try_count > timeout / 2)) + { + if (!XLogRecPtrIsInvalid(prevptr)) + { + /* LSN of the prior record was found */ + elog(LOG, "Abuse prior LSN from stream: %X/%X", + (uint32) (prevptr >> 32), (uint32) prevptr); + return prevptr; + } + } + } + } + #ifdef HAVE_LIBZ snprintf(gz_wal_segment_path, sizeof(gz_wal_segment_path), "%s.gz", wal_segment_path); @@ -1327,16 +1371,6 @@ wait_wal_lsn(const char *wal_segment_dir, XLogRecPtr target_lsn, bool is_start_l } else elog(LOG, "Found WAL segment: %s", wal_segment_path); - - - /* Check current file for stream. It may be not exist in S3 */ - if (!file_exists && segment_only && is_start_lsn && in_stream_dir && try_count > 1) - { - if( isStreamProccessed(wal_segment)) - return InvalidXLogRecPtr; - - } - } if (file_exists) diff --git a/src/compatibility/receivelog.c b/src/compatibility/receivelog.c index 5cdb568ba..3d3d3c6c9 100644 --- a/src/compatibility/receivelog.c +++ b/src/compatibility/receivelog.c @@ -495,6 +495,8 @@ ReceiveXlogStream(PGconn *conn, StreamCtl *stream) * responsibility that that's sane. */ lastFlushPosition = stream->startpos; + stream->currentpos = 0; + stream->prevpos = 0; while (1) { @@ -779,7 +781,10 @@ HandleCopyStream(PGconn *conn, StreamCtl *stream, } else if (copybuf[0] == 'w') { - if (!ProcessXLogDataMsg(conn, stream, copybuf, r, &blockpos)) + bool ok = ProcessXLogDataMsg(conn, stream, copybuf, r, &blockpos); + stream->prevpos = stream->currentpos; + stream->currentpos = blockpos; + if (!ok) goto error; /* @@ -1221,10 +1226,4 @@ CalculateCopyStreamSleeptime(TimestampTz now, int standby_message_timeout, sleeptime = -1; return sleeptime; -} - - -bool isStreamProccessed(char *seg_filename) -{ - return still_sending && !strcmp(current_walfile_name, seg_filename); } \ No newline at end of file diff --git a/src/compatibility/receivelog.h b/src/compatibility/receivelog.h index 02dc6ebd2..e4bec59e5 100644 --- a/src/compatibility/receivelog.h +++ b/src/compatibility/receivelog.h @@ -29,7 +29,9 @@ typedef bool (*stream_stop_callback) (XLogRecPtr segendpos, uint32 timeline, boo typedef struct StreamCtl { XLogRecPtr startpos; /* Start position for streaming */ - TimeLineID timeline; /* Timeline to stream data from */ + volatile XLogRecPtr currentpos; /* current position */ + volatile XLogRecPtr prevpos; /* current position */ + volatile TimeLineID timeline; /* Timeline to stream data from */ char *sysidentifier; /* Validate this system identifier and * timeline */ int standby_message_timeout; /* Send status messages this often */ diff --git a/src/pg_probackup.h b/src/pg_probackup.h index ac9875c47..e1137746e 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1221,7 +1221,7 @@ extern XLogRecPtr wait_wal_lsn(const char *wal_segment_dir, XLogRecPtr lsn, bool extern void wait_wal_and_calculate_stop_lsn(const char *xlog_path, XLogRecPtr stop_lsn, pgBackup *backup); extern int64 calculate_datasize_of_filelist(parray *filelist); -extern bool isStreamProccessed(char *seg_filename); /* Checks, that this file is stream processing. File name without path. */ +extern void getCurrentStreamPosition(TimeLineID *timeline, XLogRecPtr *ptr, XLogRecPtr *prev); /* * Slices and arrays for C strings diff --git a/src/stream.c b/src/stream.c index 342e7c4c3..157da740b 100644 --- a/src/stream.c +++ b/src/stream.c @@ -37,6 +37,8 @@ static uint32 stream_stop_timeout = 0; /* Time in which we started to wait for streaming end */ static time_t stream_stop_begin = 0; +static StreamCtl stream_ctl = {0}; + /* * We need to wait end of WAL streaming before execute pg_stop_backup(). */ @@ -234,32 +236,28 @@ StreamLog(void *arg) stream_arg->starttli); { - StreamCtl ctl; - - MemSet(&ctl, 0, sizeof(ctl)); + stream_ctl.startpos = stream_arg->startpos; + stream_ctl.timeline = stream_arg->starttli; + stream_ctl.sysidentifier = NULL; + stream_ctl.stream_stop = stop_streaming; + stream_ctl.standby_message_timeout = standby_message_timeout; - ctl.startpos = stream_arg->startpos; - ctl.timeline = stream_arg->starttli; - ctl.sysidentifier = NULL; - ctl.stream_stop = stop_streaming; - ctl.standby_message_timeout = standby_message_timeout; - - ctl.walmethod = CreateWalDirectoryMethod( + stream_ctl.walmethod = CreateWalDirectoryMethod( stream_arg->basedir, 0, false, pioDriveForLocation(FIO_BACKUP_HOST)); - ctl.replication_slot = replication_slot; - ctl.stop_socket = PGINVALID_SOCKET; + stream_ctl.replication_slot = replication_slot; + stream_ctl.stop_socket = PGINVALID_SOCKET; - if (ReceiveXlogStream(stream_arg->conn, &ctl) == false) + if (ReceiveXlogStream(stream_arg->conn, &stream_ctl) == false) { interrupted = true; elog(ERROR, "Problem in receivexlog"); } - if (!ctl.walmethod->finish()) + if (!stream_ctl.walmethod->finish()) { interrupted = true; elog(ERROR, "Could not finish writing WAL files: %s", @@ -704,3 +702,11 @@ add_history_file_to_filelist(parray *filelist, uint32 timeline, char *basedir) file = pgFileNew(fullpath, relpath, false, do_crc, drive); parray_append(filelist, file); } + +void +getCurrentStreamPosition(TimeLineID *timeline, XLogRecPtr *ptr, XLogRecPtr *prev) +{ + *ptr = stream_ctl.currentpos; + *prev = stream_ctl.prevpos; + *timeline = stream_ctl.timeline; +} From b70d76ec4abbcd71504ddfc8b09c9fb00833ca5a Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 23 Dec 2022 17:25:17 +0300 Subject: [PATCH 306/339] fix warning --- src/catalog.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/catalog.c b/src/catalog.c index 1b1caab73..22a84e00a 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -76,7 +76,7 @@ timelineInfoFree(void *tliInfo) { timelineInfo *tli = (timelineInfo *) tliInfo; - parray_walk(tli->xlog_filelist, xlogFile_free); + parray_walk(tli->xlog_filelist, (void(*)(void*))xlogFile_free); parray_free(tli->xlog_filelist); if (tli->backups) From cfcba44f971b0d8adf480b6c4b5e6c06da5f675e Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sat, 24 Dec 2022 03:23:55 +0300 Subject: [PATCH 307/339] [PBCKP-330] fix test_archive_replica_not_null_offset postgrespro enterprise has dirrerent xlog record size due to 64bit xid. ee15 changed this size again. --- tests/replica_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/replica_test.py b/tests/replica_test.py index d523ea20b..2cb29ce20 100644 --- a/tests/replica_test.py +++ b/tests/replica_test.py @@ -864,13 +864,13 @@ def test_archive_replica_not_null_offset(self): # pgproee -- 0/4000078 self.assertRegex( e.message, - r'LOG: Looking for LSN (0/4000060|0/4000078) in segment: 000000010000000000000004', + r'LOG: Looking for LSN (0/4000060|0/4000078|0/4000070) in segment: 000000010000000000000004', "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) self.assertRegex( e.message, - r'INFO: Wait for LSN (0/4000060|0/4000078) in archived WAL segment', + r'INFO: Wait for LSN (0/4000060|0/4000078|0/4000070) in archived WAL segment', "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) From 4ddf8a1dfec691beef3f334892dd026bfcab93f0 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sat, 24 Dec 2022 04:06:23 +0300 Subject: [PATCH 308/339] speedup test_archive_replica_not_null_offset --- tests/replica_test.py | 50 +++++++++++++++++-------------------------- 1 file changed, 20 insertions(+), 30 deletions(-) diff --git a/tests/replica_test.py b/tests/replica_test.py index 2cb29ce20..06db62032 100644 --- a/tests/replica_test.py +++ b/tests/replica_test.py @@ -841,44 +841,34 @@ def test_archive_replica_not_null_offset(self): self.backup_node( backup_dir, 'node', replica, replica.data_dir, options=[ - '--archive-timeout=30', - '--log-level-console=LOG', + '--archive-timeout=10', '--no-validate'], return_id=False) - try: + with self.assertRaises(ProbackupException) as ctx: self.backup_node( backup_dir, 'node', replica, replica.data_dir, options=[ - '--archive-timeout=30', + '--archive-timeout=10', '--log-level-console=LOG', '--no-validate']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of archive timeout. " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - # vanilla -- 0/4000060 - # pgproee -- 0/4000078 - self.assertRegex( - e.message, - r'LOG: Looking for LSN (0/4000060|0/4000078|0/4000070) in segment: 000000010000000000000004', - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.assertRegex( - e.message, - r'INFO: Wait for LSN (0/4000060|0/4000078|0/4000070) in archived WAL segment', - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - self.assertIn( - 'ERROR: WAL segment 000000010000000000000004 could not be archived in 30 seconds', - e.message, - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + e = ctx.exception + # vanilla -- 0/4000060 + # pgproee -- 0/4000078 + self.assertRegex( + e.message, + r'LOG: Looking for LSN (0/4000060|0/4000078|0/4000070) in segment: 000000010000000000000004', + "\n CMD: {0}".format(self.cmd)) + + self.assertRegex( + e.message, + r'INFO: Wait for LSN (0/4000060|0/4000078|0/4000070) in archived WAL segment', + "\n CMD: {0}".format(self.cmd)) + + self.assertRegex( + e.message, + r'ERROR: WAL segment 000000010000000000000004 could not be archived in \d+ seconds', + "\n CMD: {0}".format(self.cmd)) # @unittest.skip("skip") def test_replica_toast(self): From 12b8bfbc4cda482a342a9e608415dfa221f0f7ce Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sat, 24 Dec 2022 04:45:02 +0300 Subject: [PATCH 309/339] try to speedup test_replica_archive_page_backup --- tests/replica_test.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/tests/replica_test.py b/tests/replica_test.py index 06db62032..9d48a5a04 100644 --- a/tests/replica_test.py +++ b/tests/replica_test.py @@ -1,4 +1,5 @@ import os +import threading import unittest from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack from datetime import datetime, timedelta @@ -258,9 +259,12 @@ def test_replica_archive_page_backup(self): self.wait_until_replica_catch_with_master(master, replica) + tm = threading.Timer(5, call_repeat, [1000, master.execute, 'select txid_current()']) + tm.start() backup_id = self.backup_node( backup_dir, 'replica', replica, options=['--archive-timeout=60']) + tm.join() self.validate_pb(backup_dir, 'replica') self.assertEqual( @@ -287,12 +291,12 @@ def test_replica_archive_page_backup(self): master.pgbench_init(scale=5) pgbench = master.pgbench( - options=['-T', '30', '-c', '2', '--no-vacuum']) + options=['-T', '10', '-c', '2', '--no-vacuum']) backup_id = self.backup_node( backup_dir, 'replica', replica, backup_type='page', - options=['--archive-timeout=60']) + options=['--archive-timeout=10']) pgbench.wait() @@ -1555,6 +1559,9 @@ def test_replica_via_basebackup(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start(replica=True) +def call_repeat(times, func, *args): + for i in range(times): + func(*args) # TODO: # null offset STOP LSN and latest record in previous segment is conrecord (manual only) # archiving from promoted delayed replica From ffc856609737275c12465a29cfccdad7a9fd892a Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sat, 24 Dec 2022 04:57:10 +0300 Subject: [PATCH 310/339] [PBCKP-428] read_recovery_info don't need XRecOffIsValid(stop_lsn) since it uses XLogFindNextRecord, it could use stop_lsn with no valid offset. In fact, it uses truncated stop_lsn with invalid offset always. Also fix getting timestamp from possible record in a future. We could not to not enter the loop since we need record->xl_prev to correctly walk backward. So we need to add condition before getRecordTimestamp. --- src/parsexlog.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/src/parsexlog.c b/src/parsexlog.c index 83aba3d52..0f2e0eaf5 100644 --- a/src/parsexlog.c +++ b/src/parsexlog.c @@ -540,10 +540,6 @@ read_recovery_info(const char *archivedir, TimeLineID tli, uint32 wal_seg_size, elog(ERROR, "Invalid start_lsn value %X/%X", (uint32) (start_lsn >> 32), (uint32) (start_lsn)); - if (!XRecOffIsValid(stop_lsn)) - elog(ERROR, "Invalid stop_lsn value %X/%X", - (uint32) (stop_lsn >> 32), (uint32) (stop_lsn)); - xlogreader = InitXLogPageRead(&reader_data, archivedir, tli, wal_seg_size, false, true, true); @@ -581,16 +577,16 @@ read_recovery_info(const char *archivedir, TimeLineID tli, uint32 wal_seg_size, (uint32) (errptr >> 32), (uint32) (errptr)); } - /* for compatibility with Pg < 13 */ - curpoint = InvalidXLogRecPtr; - - if (getRecordTimestamp(xlogreader, &last_time)) + if (curpoint < endpoint && getRecordTimestamp(xlogreader, &last_time)) { *recovery_time = timestamptz_to_time_t(last_time); /* Found timestamp in WAL record 'record' */ res = true; } + + /* for compatibility with Pg < 13 */ + curpoint = InvalidXLogRecPtr; } while (xlogreader->EndRecPtr < endpoint); if (res) From a65902c2e08b7be46cf78b5ec044b3da302e3c90 Mon Sep 17 00:00:00 2001 From: Victor Spirin Date: Sat, 24 Dec 2022 14:50:14 +0300 Subject: [PATCH 311/339] [PBCKP-365] Fixed test help_6 test. Added check_locale function for check that locale is installed. --- tests/option_test.py | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/tests/option_test.py b/tests/option_test.py index eec1bab44..af4b12b71 100644 --- a/tests/option_test.py +++ b/tests/option_test.py @@ -3,7 +3,6 @@ from .helpers.ptrack_helpers import ProbackupTest, ProbackupException import locale - class OptionTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") @@ -220,12 +219,28 @@ def test_options_5(self): def test_help_6(self): """help options""" if ProbackupTest.enable_nls: - self.test_env['LC_ALL'] = 'ru_RU.utf-8' - with open(os.path.join(self.dir_path, "expected/option_help_ru.out"), "rb") as help_out: - self.assertEqual( - self.run_pb(["--help"]), - help_out.read().decode("utf-8") - ) + if check_locale('ru_RU.utf-8'): + self.test_env['LC_ALL'] = 'ru_RU.utf-8' + with open(os.path.join(self.dir_path, "expected/option_help_ru.out"), "rb") as help_out: + self.assertEqual( + self.run_pb(["--help"]), + help_out.read().decode("utf-8") + ) + else: + self.skipTest( + "Locale ru_RU.utf-8 doesn't work. You need install ru_RU.utf-8 locale for this test") else: self.skipTest( 'You need configure PostgreSQL with --enabled-nls option for this test') + + +def check_locale(locale_name): + ret=True + old_locale = locale.setlocale(locale.LC_CTYPE,"") + try: + locale.setlocale(locale.LC_CTYPE, locale_name) + except locale.Error: + ret=False + finally: + locale.setlocale(locale.LC_CTYPE, old_locale) + return ret From 21c811c87ae992d958744ad1fe0a9f93a67a6271 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 25 Dec 2022 03:50:25 +0300 Subject: [PATCH 312/339] [PBCKP-365] jentlier --- tests/option_test.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/option_test.py b/tests/option_test.py index af4b12b71..d56ada413 100644 --- a/tests/option_test.py +++ b/tests/option_test.py @@ -220,10 +220,11 @@ def test_help_6(self): """help options""" if ProbackupTest.enable_nls: if check_locale('ru_RU.utf-8'): - self.test_env['LC_ALL'] = 'ru_RU.utf-8' + env = self.test_env.copy() + env['LC_MESSAGES'] = 'ru_RU.utf-8' with open(os.path.join(self.dir_path, "expected/option_help_ru.out"), "rb") as help_out: self.assertEqual( - self.run_pb(["--help"]), + self.run_pb(["--help"], env=env), help_out.read().decode("utf-8") ) else: From 6a9850be628c452c5fe8be9f56ae3bb91cc322c6 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 25 Dec 2022 03:52:01 +0300 Subject: [PATCH 313/339] fix test_options_5 --- tests/option_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/option_test.py b/tests/option_test.py index d56ada413..1a6baf48a 100644 --- a/tests/option_test.py +++ b/tests/option_test.py @@ -135,9 +135,9 @@ def test_options_5(self): self.assertEqual(1, 0, "Expecting Error because of garbage in pg_probackup.conf.\n Output: {0} \n CMD: {1}".format( repr(self.output), self.cmd)) except ProbackupException as e: - self.assertIn( - 'ERROR: Syntax error in " = INFINITE', + self.assertRegex( e.message, + 'ERROR: Syntax error .* INFINITE', '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) self.clean_pb(backup_dir) From b0182bd5ed4bfef2a4643d0b19ce440f364fa8ac Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 25 Dec 2022 03:57:51 +0300 Subject: [PATCH 314/339] fix test_pgpro560_control_file_loss --- tests/pgpro560_test.py | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/tests/pgpro560_test.py b/tests/pgpro560_test.py index 416c449c3..cd2920a51 100644 --- a/tests/pgpro560_test.py +++ b/tests/pgpro560_test.py @@ -31,19 +31,9 @@ def test_pgpro560_control_file_loss(self): # Not delete this file permanently os.rename(file, os.path.join(node.base_dir, 'data', 'global', 'pg_control_copy')) - try: + with self.assertRaisesRegex(ProbackupException, + r'ERROR: Getting system identifier:.*pg_control'): self.backup_node(backup_dir, 'node', node, options=['--stream']) - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because pg_control was deleted.\n " - "Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'ERROR: Could not open file' in e.message and - 'pg_control' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) # Return this file to avoid Postger fail os.rename(os.path.join(node.base_dir, 'data', 'global', 'pg_control_copy'), file) From dc9cfb86b376124bb6bc8e236872abbbaf4647a5 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 25 Dec 2022 19:13:35 +0300 Subject: [PATCH 315/339] fix pioRemoteDrive_pioOpenRewrite --- src/utils/file.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/utils/file.c b/src/utils/file.c index acceed4c8..714d05fa9 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -3872,6 +3872,8 @@ pioRemoteDrive_pioOpenRewrite(VSelf, path_t path, int permissions, fobj_t fl; int handle = find_free_handle(); + fobj_reset_err(err); + fio_header hdr = { .cop = PIO_OPEN_REWRITE, .handle = handle, From 79b8755b62885b673232c5f5062146f9e98d85a3 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 26 Dec 2022 11:59:03 +0300 Subject: [PATCH 316/339] fix test_checkdb_checkunique --- tests/checkdb_test.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/checkdb_test.py b/tests/checkdb_test.py index 738f6c64d..b8fb59483 100644 --- a/tests/checkdb_test.py +++ b/tests/checkdb_test.py @@ -537,9 +537,14 @@ def test_checkdb_checkunique(self): repr(e.message), self.cmd)) self.assertIn( - "Amcheck failed in database 'postgres' for index: 'public.bttest_unique_idx': ERROR: index \"bttest_unique_idx\" is corrupted. There are tuples violating UNIQUE constraint", + "Amcheck failed in database 'postgres' for index: 'public.bttest_unique_idx'", e.message) + self.assertRegex( + e.message, + r"ERROR:[^\n]*(violating UNIQUE constraint|uniqueness is violated)" + ) + # Clean after yourself node.stop() From 8952b84b83dfa1ffce75bc43e7da66de32ca569c Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 25 Dec 2022 21:26:42 +0300 Subject: [PATCH 317/339] [PBCKP-434] greatly simplify stop_lsn handling It was long believed `stop_lsn` should point on start of last record. But in fact it points to end of last record (because XLogInsert(BACKUP_END) returns pointer to end of record, and that is what pg_backup_stop/ pg_stop_backup returns). So we don't need to lookup `stop_lsn` record in WAL log, but rather check existence of record which ends on `stop_lsn` (which is what `get_prior_record_lsn` does). And there is no need to raise error on `stop_lsn` which points on block end - it is very valid when xlog record ends at block end. So: - we simplify wait_wal_lsn to just wait for `get_prior_record_lsn` returns "ok" (ie find such previous record). But now we don't overwrite stop_lsn with lsn of this record. - we use `wait_wal_lsn` only in "ARCHIVE" mode/directory. So get rid of `in_stream_dir` argument. - and `wait_wal_lsn` now waits only for previous record even for `is_start_lsn`, since there is no much gain in waiting for it. Even for PAGE mode we need only records before `start_lsn`, since `start_lsn` record is considered as part of backup. - "STREAM" mode now waits for lsn by hooking streamer. We don't need to calculate stop_backup_lsn since we may use stop_backup_result.lsn directly to stop streamer. After streamer stopped, we just check wal with `get_prior_record_lsn`. We don't need to, but just for sanity. - therefore `wait_wal_and_calculate_stop_lsn` become obsolete. Lets get rid of it. Without it, there is no need in get_first_record_lsn and get_next_record_lsn. Lets delete them too. - Instead of using XRecOffIsValid and XRecOffIsNull use added XRecPtrLooksGood and XRecEndLooksGood. XRecPtrLooksGood validates record start more rigidly than XRecOffIsValid, and XRecEndLooksGood validates record end, which could be on block end. - In fact, some of `XRecOffIsValid(x)` had to be `!XLogRecPtrIsInvalid(x)` - Since stop_lsn is not inclusive now, RunXLogThreads doesn't need `inclusive_endpoint` parameter. (And because Page mode needs just till start_lsn) - PAGE_LSN_FROM_FUTURE must be considered as error, btw. (And by the way there's no need to XLogBeginRead before XLogFindNextRecord. Cleanup such calls) --- src/backup.c | 296 ++++++---------------------- src/catalog.c | 16 +- src/catchup.c | 40 ++-- src/compatibility/receivelog.c | 8 +- src/compatibility/receivelog.h | 3 +- src/data.c | 1 + src/parsexlog.c | 342 +++++++-------------------------- src/pg_probackup.h | 46 +++-- src/stream.c | 82 ++++++-- 9 files changed, 259 insertions(+), 575 deletions(-) diff --git a/src/backup.c b/src/backup.c index 04abd37f1..66b1a2201 100644 --- a/src/backup.c +++ b/src/backup.c @@ -248,7 +248,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, * Because WAL streaming will start after pg_start_backup() in stream * mode. */ - wait_wal_lsn(instanceState->instance_wal_subdir_path, current.start_lsn, true, current.tli, false, true, ERROR, false); + wait_wal_lsn(instanceState->instance_wal_subdir_path, current.start_lsn, true, current.tli, true, ERROR); } /* start stream replication */ @@ -271,7 +271,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, * PAGE backup in stream mode is waited twice, first for * segment in WAL archive and then for streamed segment */ - wait_wal_lsn(stream_xlog_path, current.start_lsn, true, current.tli, false, true, ERROR, true); + wait_WAL_streaming_starts(); } /* initialize backup's file list */ @@ -1241,17 +1241,8 @@ pg_is_superuser(PGconn *conn) * streamed in 'archive_dir' or 'pg_wal' directory. * * If flag 'is_start_lsn' is set then issue warning for first-time users. - * If flag 'in_prev_segment' is set, look for LSN in previous segment, - * with EndRecPtr >= Target LSN. It should be used only for solving - * invalid XRecOff problem. * If flag 'segment_only' is set, then, instead of waiting for LSN, wait for segment, * containing that LSN. - * If flags 'in_prev_segment' and 'segment_only' are both set, then wait for - * previous segment. - * - * Flag 'in_stream_dir' determine whether we looking for WAL in 'pg_wal' directory or - * in archive. Do note, that we cannot rely sorely on global variable 'stream_wal' (current.stream) because, - * for example, PAGE backup must(!) look for start_lsn in archive regardless of wal_mode. * * 'timeout_elevel' determine the elevel for timeout elog message. If elevel lighter than * ERROR is used, then return InvalidXLogRecPtr. TODO: return something more concrete, for example 1. @@ -1261,16 +1252,15 @@ pg_is_superuser(PGconn *conn) */ XLogRecPtr wait_wal_lsn(const char *wal_segment_dir, XLogRecPtr target_lsn, bool is_start_lsn, TimeLineID tli, - bool in_prev_segment, bool segment_only, - int timeout_elevel, bool in_stream_dir) + bool segment_only, int timeout_elevel) { XLogSegNo targetSegNo; char wal_segment_path[MAXPGPATH], wal_segment[MAXFNAMELEN]; + char* try_segment_path = NULL; bool file_exists = false; uint32 try_count = 0, timeout; - char *wal_delivery_str = in_stream_dir ? "streamed":"archived"; #ifdef HAVE_LIBZ char gz_wal_segment_path[MAXPGPATH]; @@ -1278,7 +1268,7 @@ wait_wal_lsn(const char *wal_segment_dir, XLogRecPtr target_lsn, bool is_start_l /* Compute the name of the WAL file containing requested LSN */ GetXLogSegNo(target_lsn, targetSegNo, instance_config.xlog_seg_size); - if (in_prev_segment) + if (target_lsn % instance_config.xlog_seg_size == 0) targetSegNo--; GetXLogFileName(wal_segment, tli, targetSegNo, instance_config.xlog_seg_size); @@ -1304,50 +1294,6 @@ wait_wal_lsn(const char *wal_segment_dir, XLogRecPtr target_lsn, bool is_start_l elog(LOG, "Looking for LSN %X/%X in segment: %s", (uint32) (target_lsn >> 32), (uint32) target_lsn, wal_segment); - if (in_stream_dir && !in_prev_segment) - { - /* separate simple loop for streaming */ - for (;;) - { - TimeLineID curtli; - XLogRecPtr curptr; - XLogRecPtr prevptr; - - getCurrentStreamPosition(&curtli, &curptr, &prevptr); - if (curtli > tli || (curtli == tli && curptr > target_lsn)) - return target_lsn; - - sleep(1); - if (interrupted || thread_interrupted) - elog(ERROR, "Interrupted during waiting for WAL streaming"); - try_count++; - - /* Inform user if WAL segment is absent in first attempt */ - if (try_count == 1) - { - if (segment_only) - elog(INFO, "Wait for WAL segment %s to be %s", - wal_segment_path, wal_delivery_str); - else - elog(INFO, "Wait for LSN %X/%X in %s WAL segment %s", - (uint32) (target_lsn >> 32), (uint32) target_lsn, - wal_delivery_str, wal_segment_path); - } - - if (current.from_replica && - (XRecOffIsNull(target_lsn) || try_count > timeout / 2)) - { - if (!XLogRecPtrIsInvalid(prevptr)) - { - /* LSN of the prior record was found */ - elog(LOG, "Abuse prior LSN from stream: %X/%X", - (uint32) (prevptr >> 32), (uint32) prevptr); - return prevptr; - } - } - } - } - #ifdef HAVE_LIBZ snprintf(gz_wal_segment_path, sizeof(gz_wal_segment_path), "%s.gz", wal_segment_path); @@ -1366,11 +1312,17 @@ wait_wal_lsn(const char *wal_segment_dir, XLogRecPtr target_lsn, bool is_start_l #ifdef HAVE_LIBZ file_exists = fileExists(gz_wal_segment_path, FIO_BACKUP_HOST); if (file_exists) - elog(LOG, "Found compressed WAL segment: %s", wal_segment_path); + { + elog(LOG, "Found compressed WAL segment: %s", gz_wal_segment_path); + try_segment_path = gz_wal_segment_path; + } #endif } else + { elog(LOG, "Found WAL segment: %s", wal_segment_path); + try_segment_path = wal_segment_path; + } } if (file_exists) @@ -1379,18 +1331,6 @@ wait_wal_lsn(const char *wal_segment_dir, XLogRecPtr target_lsn, bool is_start_l if (segment_only) return InvalidXLogRecPtr; - /* - * A WAL segment found. Look for target LSN in it. - */ - if (!XRecOffIsNull(target_lsn) && - wal_contains_lsn(wal_segment_dir, target_lsn, tli, - instance_config.xlog_seg_size)) - /* Target LSN was found */ - { - elog(LOG, "Found LSN: %X/%X", (uint32) (target_lsn >> 32), (uint32) target_lsn); - return target_lsn; - } - /* * If we failed to get target LSN in a reasonable time, try * to get LSN of last valid record prior to the target LSN. But only @@ -1405,39 +1345,42 @@ wait_wal_lsn(const char *wal_segment_dir, XLogRecPtr target_lsn, bool is_start_l * 2. Replica returened endpoint LSN with NullXRecOff. We want to look * for previous record which endpoint points greater or equal LSN in previous WAL segment. */ - if (current.from_replica && - (XRecOffIsNull(target_lsn) || try_count > timeout / 2)) - { - XLogRecPtr res; + XLogRecPtr prev; - res = get_prior_record_lsn(wal_segment_dir, current.start_lsn, target_lsn, tli, - in_prev_segment, instance_config.xlog_seg_size); + prev = get_prior_record_lsn(wal_segment_dir, current.start_lsn, target_lsn, tli, + instance_config.xlog_seg_size); - if (!XLogRecPtrIsInvalid(res)) - { - /* LSN of the prior record was found */ - elog(LOG, "Found prior LSN: %X/%X", - (uint32) (res >> 32), (uint32) res); - return res; - } + if (!XLogRecPtrIsInvalid(prev)) + { + /* LSN of the prior record was found */ + elog(LOG, "Found prior LSN: %X/%X", + (uint32) (prev >> 32), (uint32) prev); + return target_lsn; } + elog(ERROR, "Attempt %d: prior lsn is not found in %s", try_count+1, + try_segment_path); + } + else + { + elog(LOG, "Attempt %d: file %s is not found", try_count+1, + wal_segment_path); } sleep(1); if (interrupted || thread_interrupted) - elog(ERROR, "Interrupted during waiting for WAL %s", in_stream_dir ? "streaming" : "archiving"); + elog(ERROR, "Interrupted during waiting for WAL archiving"); try_count++; /* Inform user if WAL segment is absent in first attempt */ if (try_count == 1) { if (segment_only) - elog(INFO, "Wait for WAL segment %s to be %s", - wal_segment_path, wal_delivery_str); + elog(INFO, "Wait for WAL segment %s to be archived", + wal_segment_path); else - elog(INFO, "Wait for LSN %X/%X in %s WAL segment %s", + elog(INFO, "Wait for LSN %X/%X in archived WAL segment %s", (uint32) (target_lsn >> 32), (uint32) target_lsn, - wal_delivery_str, wal_segment_path); + wal_segment_path); } if (!current.stream && is_start_lsn && try_count == 30) @@ -1448,158 +1391,22 @@ wait_wal_lsn(const char *wal_segment_dir, XLogRecPtr target_lsn, bool is_start_l if (timeout > 0 && try_count > timeout) { if (file_exists) - elog(timeout_elevel, "WAL segment %s was %s, " - "but target LSN %X/%X could not be %s in %d seconds", - wal_segment, wal_delivery_str, + elog(timeout_elevel, "WAL segment %s was archived, " + "but target LSN %X/%X could not be archived in %d seconds", + wal_segment, (uint32) (target_lsn >> 32), (uint32) target_lsn, - wal_delivery_str, timeout); + timeout); /* If WAL segment doesn't exist or we wait for previous segment */ else elog(timeout_elevel, - "WAL segment %s could not be %s in %d seconds", - wal_segment, wal_delivery_str, timeout); + "WAL segment %s could not be archived in %d seconds", + wal_segment, timeout); return InvalidXLogRecPtr; } } } -/* - * Check stop_lsn (returned from pg_stop_backup()) and update backup->stop_lsn - */ -void -wait_wal_and_calculate_stop_lsn(const char *xlog_path, XLogRecPtr stop_lsn, pgBackup *backup) -{ - bool stop_lsn_exists = false; - - /* It is ok for replica to return invalid STOP LSN - * UPD: Apparently it is ok even for a master. - */ - if (!XRecOffIsValid(stop_lsn)) - { - XLogSegNo segno = 0; - XLogRecPtr lsn_tmp = InvalidXLogRecPtr; - - /* - * Even though the value is invalid, it's expected postgres behaviour - * and we're trying to fix it below. - */ - elog(LOG, "Invalid offset in stop_lsn value %X/%X, trying to fix", - (uint32) (stop_lsn >> 32), (uint32) (stop_lsn)); - - /* - * Note: even with gdb it is very hard to produce automated tests for - * contrecord + invalid LSN, so emulate it for manual testing. - */ - //lsn = lsn - XLOG_SEG_SIZE; - //elog(WARNING, "New Invalid stop_backup_lsn value %X/%X", - // (uint32) (stop_lsn >> 32), (uint32) (stop_lsn)); - - GetXLogSegNo(stop_lsn, segno, instance_config.xlog_seg_size); - - /* - * Note, that there is no guarantee that corresponding WAL file even exists. - * Replica may return LSN from future and keep staying in present. - * Or it can return invalid LSN. - * - * That's bad, since we want to get real LSN to save it in backup label file - * and to use it in WAL validation. - * - * So we try to do the following: - * 1. Wait 'archive_timeout' seconds for segment containing stop_lsn and - * look for the first valid record in it. - * It solves the problem of occasional invalid LSN on write-busy system. - * 2. Failing that, look for record in previous segment with endpoint - * equal or greater than stop_lsn. It may(!) solve the problem of invalid LSN - * on write-idle system. If that fails too, error out. - */ - - /* stop_lsn is pointing to a 0 byte of xlog segment */ - if (stop_lsn % instance_config.xlog_seg_size == 0) - { - /* Wait for segment with current stop_lsn, it is ok for it to never arrive */ - wait_wal_lsn(xlog_path, stop_lsn, false, backup->tli, - false, true, WARNING, backup->stream); - - /* Get the first record in segment with current stop_lsn */ - lsn_tmp = get_first_record_lsn(xlog_path, segno, backup->tli, - instance_config.xlog_seg_size, - instance_config.archive_timeout); - - /* Check that returned LSN is valid and greater than stop_lsn */ - if (XLogRecPtrIsInvalid(lsn_tmp) || - !XRecOffIsValid(lsn_tmp) || - lsn_tmp < stop_lsn) - { - /* Backup from master should error out here */ - if (!backup->from_replica) - elog(ERROR, "Failed to get next WAL record after %X/%X", - (uint32) (stop_lsn >> 32), - (uint32) (stop_lsn)); - - /* No luck, falling back to looking up for previous record */ - elog(WARNING, "Failed to get next WAL record after %X/%X, " - "looking for previous WAL record", - (uint32) (stop_lsn >> 32), - (uint32) (stop_lsn)); - - /* Despite looking for previous record there is not guarantee of success - * because previous record can be the contrecord. - */ - lsn_tmp = wait_wal_lsn(xlog_path, stop_lsn, false, backup->tli, - true, false, ERROR, backup->stream); - - /* sanity */ - if (!XRecOffIsValid(lsn_tmp) || XLogRecPtrIsInvalid(lsn_tmp)) - elog(ERROR, "Failed to get WAL record prior to %X/%X", - (uint32) (stop_lsn >> 32), - (uint32) (stop_lsn)); - } - } - /* stop lsn is aligned to xlog block size, just find next lsn */ - else if (stop_lsn % XLOG_BLCKSZ == 0) - { - /* Wait for segment with current stop_lsn */ - wait_wal_lsn(xlog_path, stop_lsn, false, backup->tli, - false, true, ERROR, backup->stream); - - /* Get the next closest record in segment with current stop_lsn */ - lsn_tmp = get_next_record_lsn(xlog_path, segno, backup->tli, - instance_config.xlog_seg_size, - instance_config.archive_timeout, - stop_lsn); - - /* sanity */ - if (!XRecOffIsValid(lsn_tmp) || XLogRecPtrIsInvalid(lsn_tmp)) - elog(ERROR, "Failed to get WAL record next to %X/%X", - (uint32) (stop_lsn >> 32), - (uint32) (stop_lsn)); - } - /* PostgreSQL returned something very illegal as STOP_LSN, error out */ - else - elog(ERROR, "Invalid stop_backup_lsn value %X/%X", - (uint32) (stop_lsn >> 32), (uint32) (stop_lsn)); - - /* Setting stop_backup_lsn will set stop point for streaming */ - stop_backup_lsn = lsn_tmp; - stop_lsn_exists = true; - } - - elog(INFO, "stop_lsn: %X/%X", - (uint32) (stop_lsn >> 32), (uint32) (stop_lsn)); - - /* - * Wait for stop_lsn to be archived or streamed. - * If replica returned valid STOP_LSN of not actually existing record, - * look for previous record with endpoint >= STOP_LSN. - */ - if (!stop_lsn_exists) - stop_backup_lsn = wait_wal_lsn(xlog_path, stop_lsn, false, backup->tli, - false, false, ERROR, backup->stream); - - backup->stop_lsn = stop_backup_lsn; -} - /* Remove annoying NOTICE messages generated by backend */ void pg_silent_client_messages(PGconn *conn) @@ -1901,11 +1708,26 @@ pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startb { join_path_components(stream_xlog_path, backup->database_dir, PG_XLOG_DIR); xlog_path = stream_xlog_path; + /* This function will also add list of xlog files + * to the passed filelist */ + if(wait_WAL_streaming_end(backup_files_list, xlog_path, + stop_backup_result.lsn, backup)) + elog(ERROR, "WAL streaming failed"); + elog(INFO, "backup->stop_lsn %X/%X", + (uint32_t)(backup->stop_lsn>>32), (uint32_t)backup->stop_lsn); + } else + { xlog_path = instanceState->instance_wal_subdir_path; - wait_wal_and_calculate_stop_lsn(xlog_path, stop_backup_result.lsn, backup); + backup->stop_lsn = wait_wal_lsn(xlog_path, stop_backup_result.lsn, false, backup->tli, + false, ERROR); + if (XLogRecPtrIsInvalid(backup->stop_lsn)) + elog(ERROR, "We couldn't wait for %llX", + (long long)stop_backup_result.lsn); + ft_assert(backup->stop_lsn == stop_backup_result.lsn); + } /* Write backup_label and tablespace_map */ Assert(stop_backup_result.backup_label_content.len != 0); @@ -1925,14 +1747,6 @@ pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startb } ft_str_free(&stop_backup_result.tablespace_map_content); - if (backup->stream) - { - /* This function will also add list of xlog files - * to the passed filelist */ - if(wait_WAL_streaming_end(backup_files_list)) - elog(ERROR, "WAL streaming failed"); - } - backup->recovery_xid = stop_backup_result.snapshot_xid; elog(INFO, "Getting the Recovery Time from WAL"); diff --git a/src/catalog.c b/src/catalog.c index 22a84e00a..2f873c5ed 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -18,8 +18,8 @@ #include "utils/file.h" #include "utils/configuration.h" -static pgBackup* get_closest_backup(timelineInfo *tlinfo); -static pgBackup* get_oldest_backup(timelineInfo *tlinfo); +static pgBackup* get_closest_backup(timelineInfo *tlinfo, uint32_t xlog_seg_size); +static pgBackup* get_oldest_backup(timelineInfo *tlinfo, uint32_t xlog_seg_size); static const char *backupModes[] = {"", "PAGE", "PTRACK", "DELTA", "FULL"}; static err_i create_backup_dir(pgBackup *backup, const char *backup_instance_path); @@ -1857,8 +1857,8 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) { timelineInfo *tlinfo = parray_get(timelineinfos, i); - tlinfo->oldest_backup = get_oldest_backup(tlinfo); - tlinfo->closest_backup = get_closest_backup(tlinfo); + tlinfo->oldest_backup = get_oldest_backup(tlinfo, instance->xlog_seg_size); + tlinfo->closest_backup = get_closest_backup(tlinfo, instance->xlog_seg_size); } /* determine which WAL segments must be kept because of wal retention */ @@ -2222,7 +2222,7 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) * timeline is unreachable. Return NULL. */ pgBackup* -get_closest_backup(timelineInfo *tlinfo) +get_closest_backup(timelineInfo *tlinfo, uint32 xlog_seg_size) { pgBackup *closest_backup = NULL; int i; @@ -2245,7 +2245,7 @@ get_closest_backup(timelineInfo *tlinfo) * should be considered. */ if (!XLogRecPtrIsInvalid(backup->stop_lsn) && - XRecOffIsValid(backup->stop_lsn) && + XRecEndLooksGood(backup->stop_lsn, xlog_seg_size) && backup->stop_lsn <= tlinfo->switchpoint && (backup->status == BACKUP_STATUS_OK || backup->status == BACKUP_STATUS_DONE)) @@ -2273,7 +2273,7 @@ get_closest_backup(timelineInfo *tlinfo) * there is no backups on this timeline. Return NULL. */ pgBackup* -get_oldest_backup(timelineInfo *tlinfo) +get_oldest_backup(timelineInfo *tlinfo, uint32_t xlog_seg_size) { pgBackup *oldest_backup = NULL; int i; @@ -2287,7 +2287,7 @@ get_oldest_backup(timelineInfo *tlinfo) /* Backups with invalid START LSN can be safely skipped */ if (XLogRecPtrIsInvalid(backup->start_lsn) || - !XRecOffIsValid(backup->start_lsn)) + !XRecPtrLooksGood(backup->start_lsn, xlog_seg_size)) continue; /* diff --git a/src/catchup.c b/src/catchup.c index 848a290e9..9f690dbb2 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -1048,8 +1048,26 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, pg_free(stop_backup_query_text); } + /* wait for end of wal streaming and calculate wal size transfered */ if (!dry_run) - wait_wal_and_calculate_stop_lsn(dest_xlog_path, stop_backup_result.lsn, ¤t); + { + parray *wal_files_list = NULL; + wal_files_list = parray_new(); + + if (wait_WAL_streaming_end(wal_files_list, dest_xlog_path, + stop_backup_result.lsn, ¤t)) + elog(ERROR, "WAL streaming failed"); + + for (i = 0; i < parray_num(wal_files_list); i++) + { + pgFile *file = (pgFile *) parray_get(wal_files_list, i); + transfered_walfiles_bytes += file->size; + } + + parray_walk(wal_files_list, pgFileFree); + parray_free(wal_files_list); + wal_files_list = NULL; + } /* Write backup_label */ Assert(stop_backup_result.backup_label_content.len != 0); @@ -1075,26 +1093,6 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, } ft_str_free(&stop_backup_result.tablespace_map_content); - /* wait for end of wal streaming and calculate wal size transfered */ - if (!dry_run) - { - parray *wal_files_list = NULL; - wal_files_list = parray_new(); - - if (wait_WAL_streaming_end(wal_files_list)) - elog(ERROR, "WAL streaming failed"); - - for (i = 0; i < parray_num(wal_files_list); i++) - { - pgFile *file = (pgFile *) parray_get(wal_files_list, i); - transfered_walfiles_bytes += file->size; - } - - parray_walk(wal_files_list, pgFileFree); - parray_free(wal_files_list); - wal_files_list = NULL; - } - /* * In case of backup from replica we must fix minRecPoint */ diff --git a/src/compatibility/receivelog.c b/src/compatibility/receivelog.c index 3d3d3c6c9..c5972ea1e 100644 --- a/src/compatibility/receivelog.c +++ b/src/compatibility/receivelog.c @@ -496,7 +496,6 @@ ReceiveXlogStream(PGconn *conn, StreamCtl *stream) */ lastFlushPosition = stream->startpos; stream->currentpos = 0; - stream->prevpos = 0; while (1) { @@ -781,12 +780,11 @@ HandleCopyStream(PGconn *conn, StreamCtl *stream, } else if (copybuf[0] == 'w') { - bool ok = ProcessXLogDataMsg(conn, stream, copybuf, r, &blockpos); - stream->prevpos = stream->currentpos; - stream->currentpos = blockpos; - if (!ok) + if (!ProcessXLogDataMsg(conn, stream, copybuf, r, &blockpos)) goto error; + stream->currentpos = blockpos; + /* * Check if we should continue streaming, or abort at this * point. diff --git a/src/compatibility/receivelog.h b/src/compatibility/receivelog.h index e4bec59e5..cd5a290c9 100644 --- a/src/compatibility/receivelog.h +++ b/src/compatibility/receivelog.h @@ -30,8 +30,7 @@ typedef struct StreamCtl { XLogRecPtr startpos; /* Start position for streaming */ volatile XLogRecPtr currentpos; /* current position */ - volatile XLogRecPtr prevpos; /* current position */ - volatile TimeLineID timeline; /* Timeline to stream data from */ + TimeLineID timeline; /* Timeline to stream data from */ char *sysidentifier; /* Validate this system identifier and * timeline */ int standby_message_timeout; /* Send status messages this often */ diff --git a/src/data.c b/src/data.c index 55e32f2ed..8a24ade40 100644 --- a/src/data.c +++ b/src/data.c @@ -1541,6 +1541,7 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, checksum_version ? "correct" : "not enabled", (uint32) (page_st.lsn >> 32), (uint32) page_st.lsn, (uint32) (stop_lsn >> 32), (uint32) stop_lsn); + is_valid = false; break; } } diff --git a/src/parsexlog.c b/src/parsexlog.c index 0f2e0eaf5..821ff1783 100644 --- a/src/parsexlog.c +++ b/src/parsexlog.c @@ -134,9 +134,6 @@ typedef struct */ bool got_target; - /* Should we read record, located at endpoint position */ - bool inclusive_endpoint; - /* * Return value from the thread. * 0 means there is no error, 1 - there is an error. @@ -167,8 +164,7 @@ static bool RunXLogThreads(const char *archivedir, XLogRecPtr startpoint, XLogRecPtr endpoint, bool consistent_read, xlog_record_function process_record, - XLogRecTarget *last_rec, - bool inclusive_endpoint); + XLogRecTarget *last_rec); //static XLogReaderState *InitXLogThreadRead(xlog_thread_arg *arg); static bool SwitchThreadToNextWal(XLogReaderState *xlogreader, xlog_thread_arg *arg); @@ -250,7 +246,7 @@ extractPageMap(const char *archivedir, uint32 wal_seg_size, extract_isok = RunXLogThreads(archivedir, 0, InvalidTransactionId, InvalidXLogRecPtr, end_tli, wal_seg_size, startpoint, endpoint, false, extractPageInfo, - NULL, true); + NULL); else { /* We have to process WAL located on several different xlog intervals, @@ -329,22 +325,12 @@ extractPageMap(const char *archivedir, uint32 wal_seg_size, for (i = parray_num(interval_list) - 1; i >= 0; i--) { - bool inclusive_endpoint; lsnInterval *tmp_interval = (lsnInterval *) parray_get(interval_list, i); - /* In case of replica promotion, endpoints of intermediate - * timelines can be unreachable. - */ - inclusive_endpoint = false; - - /* ... but not the end timeline */ - if (tmp_interval->tli == end_tli) - inclusive_endpoint = true; - extract_isok = RunXLogThreads(archivedir, 0, InvalidTransactionId, InvalidXLogRecPtr, tmp_interval->tli, wal_seg_size, tmp_interval->begin_lsn, tmp_interval->end_lsn, - false, extractPageInfo, NULL, inclusive_endpoint); + false, extractPageInfo, NULL); if (!extract_isok) break; @@ -373,7 +359,7 @@ validate_backup_wal_from_start_to_stop(pgBackup *backup, got_endpoint = RunXLogThreads(archivedir, 0, InvalidTransactionId, InvalidXLogRecPtr, tli, xlog_seg_size, backup->start_lsn, backup->stop_lsn, - false, NULL, NULL, true); + false, NULL, NULL); if (!got_endpoint) { @@ -414,7 +400,7 @@ validate_wal(pgBackup *backup, const char *archivedir, (uint32) (backup->start_lsn >> 32), (uint32) (backup->start_lsn), backup_id_of(backup)); - if (!XRecOffIsValid(backup->stop_lsn)) + if (!XRecEndLooksGood(backup->stop_lsn, wal_seg_size)) elog(ERROR, "Invalid stop_lsn value %X/%X of backup %s", (uint32) (backup->stop_lsn >> 32), (uint32) (backup->stop_lsn), backup_id_of(backup)); @@ -448,7 +434,7 @@ validate_wal(pgBackup *backup, const char *archivedir, * recovery target time or xid. */ if (!TransactionIdIsValid(target_xid) && target_time == 0 && - !XRecOffIsValid(target_lsn)) + XLogRecPtrIsInvalid(target_lsn)) { /* Recovery target is not given so exit */ elog(INFO, "Backup %s WAL segments are valid", backup_id_of(backup)); @@ -479,13 +465,13 @@ validate_wal(pgBackup *backup, const char *archivedir, if ((TransactionIdIsValid(target_xid) && target_xid == last_rec.rec_xid) || (target_time != 0 && backup->recovery_time >= target_time) - || (XRecOffIsValid(target_lsn) && last_rec.rec_lsn >= target_lsn)) + || (!XLogRecPtrIsInvalid(target_lsn) && last_rec.rec_lsn >= target_lsn)) all_wal = true; all_wal = all_wal || RunXLogThreads(archivedir, target_time, target_xid, target_lsn, tli, wal_seg_size, backup->stop_lsn, - InvalidXLogRecPtr, true, validateXLogRecord, &last_rec, true); + InvalidXLogRecPtr, true, validateXLogRecord, &last_rec); if (last_rec.rec_time > 0) time2iso(last_timestamp, lengthof(last_timestamp), timestamptz_to_time_t(last_rec.rec_time), false); @@ -513,7 +499,7 @@ validate_wal(pgBackup *backup, const char *archivedir, else if (target_time != 0) elog(ERROR, "Not enough WAL records to time %s", target_timestamp); - else if (XRecOffIsValid(target_lsn)) + else if (!XLogRecPtrIsInvalid(target_lsn)) elog(ERROR, "Not enough WAL records to lsn %X/%X", (uint32) (target_lsn >> 32), (uint32) (target_lsn)); } @@ -540,23 +526,43 @@ read_recovery_info(const char *archivedir, TimeLineID tli, uint32 wal_seg_size, elog(ERROR, "Invalid start_lsn value %X/%X", (uint32) (start_lsn >> 32), (uint32) (start_lsn)); + if (!XRecEndLooksGood(stop_lsn, wal_seg_size)) + elog(ERROR, "Invalid stop_lsn value %X/%X", + (uint32) (stop_lsn >> 32), (uint32) (stop_lsn)); + xlogreader = InitXLogPageRead(&reader_data, archivedir, tli, wal_seg_size, false, true, true); /* Read records from stop_lsn down to start_lsn */ do { + XLogRecPtr trypoint; XLogRecPtr curpoint; XLogRecPtr prevpoint = 0; XLogRecord *record; TimestampTz last_time = 0; char *errormsg; - curpoint = startpoint; - if (curpoint < start_lsn) - curpoint = start_lsn; + trypoint = startpoint; + if (trypoint < start_lsn) + trypoint = start_lsn; - curpoint = XLogFindNextRecord(xlogreader, curpoint); + curpoint = XLogFindNextRecord(xlogreader, trypoint); + + if (XLogRecPtrIsInvalid(curpoint)) + { + if (trypoint == start_lsn) + { + elog(ERROR, "There is no valid log between %X/%X and %X/%X", + (uint32_t)(start_lsn>>32), (uint32_t)start_lsn, + (uint32_t)(stop_lsn>>32), (uint32_t)stop_lsn); + break; + } + endpoint = startpoint; + startpoint--; + startpoint = startpoint - (startpoint % STEPBACK_CHUNK); + continue; + } do { record = WalReadRecord(xlogreader, curpoint, &errormsg); @@ -595,8 +601,6 @@ read_recovery_info(const char *archivedir, TimeLineID tli, uint32 wal_seg_size, /* Goto previous megabyte */ endpoint = startpoint; startpoint = prevpoint - (prevpoint % STEPBACK_CHUNK); - if (startpoint < start_lsn) - startpoint = start_lsn; } while (endpoint > start_lsn); /* Didn't find timestamp from WAL records between start_lsn and stop_lsn */ @@ -609,205 +613,6 @@ read_recovery_info(const char *archivedir, TimeLineID tli, uint32 wal_seg_size, return res; } -/* - * Check if there is a WAL segment file in 'archivedir' which contains - * 'target_lsn'. - */ -bool -wal_contains_lsn(const char *archivedir, XLogRecPtr target_lsn, - TimeLineID target_tli, uint32 wal_seg_size) -{ - XLogReaderState *xlogreader; - XLogReaderData reader_data; - char *errormsg; - bool res; - - if (!XRecOffIsValid(target_lsn)) - elog(ERROR, "Invalid target_lsn value %X/%X", - (uint32) (target_lsn >> 32), (uint32) (target_lsn)); - - xlogreader = InitXLogPageRead(&reader_data, archivedir, target_tli, - wal_seg_size, false, false, true); - - if (xlogreader == NULL) - elog(ERROR, "Out of memory"); - - xlogreader->system_identifier = instance_config.system_identifier; - -#if PG_VERSION_NUM >= 130000 - if (XLogRecPtrIsInvalid(target_lsn)) - target_lsn = SizeOfXLogShortPHD; - XLogBeginRead(xlogreader, target_lsn); -#endif - - res = WalReadRecord(xlogreader, target_lsn, &errormsg) != NULL; - /* Didn't find 'target_lsn' and there is no error, return false */ - - if (errormsg) - elog(WARNING, "Could not read WAL record at %X/%X: %s", - (uint32) (target_lsn >> 32), (uint32) (target_lsn), errormsg); - - CleanupXLogPageRead(xlogreader); - XLogReaderFree(xlogreader); - - return res; -} - -/* - * Get LSN of a first record within the WAL segment with number 'segno'. - */ -XLogRecPtr -get_first_record_lsn(const char *archivedir, XLogSegNo segno, - TimeLineID tli, uint32 wal_seg_size, int timeout) -{ - XLogReaderState *xlogreader; - XLogReaderData reader_data; - XLogRecPtr record = InvalidXLogRecPtr; - XLogRecPtr startpoint; - char wal_segment[MAXFNAMELEN]; - int attempts = 0; - - if (segno <= 1) - elog(ERROR, "Invalid WAL segment number " UINT64_FORMAT, segno); - - GetXLogFileName(wal_segment, tli, segno, instance_config.xlog_seg_size); - - xlogreader = InitXLogPageRead(&reader_data, archivedir, tli, wal_seg_size, - false, false, true); - if (xlogreader == NULL) - elog(ERROR, "Out of memory"); - xlogreader->system_identifier = instance_config.system_identifier; - - /* Set startpoint to 0 in segno */ - GetXLogRecPtr(segno, 0, wal_seg_size, startpoint); - -#if PG_VERSION_NUM >= 130000 - if (XLogRecPtrIsInvalid(startpoint)) - startpoint = SizeOfXLogShortPHD; - XLogBeginRead(xlogreader, startpoint); -#endif - - while (attempts <= timeout) - { - record = XLogFindNextRecord(xlogreader, startpoint); - - if (XLogRecPtrIsInvalid(record)) - record = InvalidXLogRecPtr; - else - { - elog(LOG, "First record in WAL segment \"%s\": %X/%X", wal_segment, - (uint32) (record >> 32), (uint32) (record)); - break; - } - - attempts++; - sleep(1); - } - - /* cleanup */ - CleanupXLogPageRead(xlogreader); - XLogReaderFree(xlogreader); - - return record; -} - - -/* - * Get LSN of the record next after target lsn. - */ -XLogRecPtr -get_next_record_lsn(const char *archivedir, XLogSegNo segno, - TimeLineID tli, uint32 wal_seg_size, int timeout, - XLogRecPtr target) -{ - XLogReaderState *xlogreader; - XLogReaderData reader_data; - XLogRecPtr startpoint, found; - XLogRecPtr res = InvalidXLogRecPtr; - char wal_segment[MAXFNAMELEN]; - int attempts = 0; - - if (segno <= 1) - elog(ERROR, "Invalid WAL segment number " UINT64_FORMAT, segno); - - GetXLogFileName(wal_segment, tli, segno, instance_config.xlog_seg_size); - - xlogreader = InitXLogPageRead(&reader_data, archivedir, tli, wal_seg_size, - false, false, true); - if (xlogreader == NULL) - elog(ERROR, "Out of memory"); - xlogreader->system_identifier = instance_config.system_identifier; - - /* Set startpoint to 0 in segno */ - GetXLogRecPtr(segno, 0, wal_seg_size, startpoint); - -#if PG_VERSION_NUM >= 130000 - if (XLogRecPtrIsInvalid(startpoint)) - startpoint = SizeOfXLogShortPHD; - XLogBeginRead(xlogreader, startpoint); -#endif - - found = XLogFindNextRecord(xlogreader, startpoint); - - if (XLogRecPtrIsInvalid(found)) - { - if (xlogreader->errormsg_buf[0] != '\0') - elog(WARNING, "Could not read WAL record at %X/%X: %s", - (uint32) (startpoint >> 32), (uint32) (startpoint), - xlogreader->errormsg_buf); - else - elog(WARNING, "Could not read WAL record at %X/%X", - (uint32) (startpoint >> 32), (uint32) (startpoint)); - PrintXLogCorruptionMsg(&reader_data, ERROR); - } - startpoint = found; - - while (attempts <= timeout) - { - XLogRecord *record; - char *errormsg; - - if (interrupted) - elog(ERROR, "Interrupted during WAL reading"); - - record = WalReadRecord(xlogreader, startpoint, &errormsg); - - if (record == NULL) - { - XLogRecPtr errptr; - - errptr = XLogRecPtrIsInvalid(startpoint) ? xlogreader->EndRecPtr : - startpoint; - - if (errormsg) - elog(WARNING, "Could not read WAL record at %X/%X: %s", - (uint32) (errptr >> 32), (uint32) (errptr), - errormsg); - else - elog(WARNING, "Could not read WAL record at %X/%X", - (uint32) (errptr >> 32), (uint32) (errptr)); - PrintXLogCorruptionMsg(&reader_data, ERROR); - } - - if (xlogreader->ReadRecPtr >= target) - { - elog(LOG, "Record %X/%X is next after target LSN %X/%X", - (uint32) (xlogreader->ReadRecPtr >> 32), (uint32) (xlogreader->ReadRecPtr), - (uint32) (target >> 32), (uint32) (target)); - res = xlogreader->ReadRecPtr; - break; - } - else - startpoint = InvalidXLogRecPtr; - } - - /* cleanup */ - CleanupXLogPageRead(xlogreader); - XLogReaderFree(xlogreader); - - return res; -} - /* * Get LSN of a record prior to target_lsn. @@ -822,8 +627,7 @@ get_next_record_lsn(const char *archivedir, XLogSegNo segno, */ XLogRecPtr get_prior_record_lsn(const char *archivedir, XLogRecPtr start_lsn, - XLogRecPtr stop_lsn, TimeLineID tli, bool seek_prev_segment, - uint32 wal_seg_size) + XLogRecPtr stop_lsn, TimeLineID tli, uint32 wal_seg_size) { XLogReaderState *xlogreader; XLogReaderData reader_data; @@ -837,7 +641,7 @@ get_prior_record_lsn(const char *archivedir, XLogRecPtr start_lsn, if (segno <= 1) elog(ERROR, "Invalid WAL segment number " UINT64_FORMAT, segno); - if (seek_prev_segment) + if (stop_lsn % wal_seg_size == 0) segno = segno - 1; xlogreader = InitXLogPageRead(&reader_data, archivedir, tli, wal_seg_size, @@ -852,26 +656,15 @@ get_prior_record_lsn(const char *archivedir, XLogRecPtr start_lsn, * Calculate startpoint. Decide: we should use 'start_lsn' or offset 0. */ GetXLogSegNo(start_lsn, start_segno, wal_seg_size); - if (start_segno == segno) - { - startpoint = start_lsn; -#if PG_VERSION_NUM >= 130000 - if (XLogRecPtrIsInvalid(startpoint)) - startpoint = SizeOfXLogShortPHD; - XLogBeginRead(xlogreader, startpoint); -#endif - } - else + + for (;;) { XLogRecPtr found; - GetXLogRecPtr(segno, 0, wal_seg_size, startpoint); - -#if PG_VERSION_NUM >= 130000 - if (XLogRecPtrIsInvalid(startpoint)) - startpoint = SizeOfXLogShortPHD; - XLogBeginRead(xlogreader, startpoint); -#endif + if (start_segno == segno) + startpoint = start_lsn; + else + GetXLogRecPtr(segno, 0, wal_seg_size, startpoint); found = XLogFindNextRecord(xlogreader, startpoint); @@ -884,9 +677,28 @@ get_prior_record_lsn(const char *archivedir, XLogRecPtr start_lsn, else elog(WARNING, "Could not read WAL record at %X/%X", (uint32) (startpoint >> 32), (uint32) (startpoint)); + if (segno > start_segno) + { + segno--; + continue; + } PrintXLogCorruptionMsg(&reader_data, ERROR); } startpoint = found; + break; + } + + /* This check doesn't make much value. But let it be. */ + if (start_segno == segno && + XRecPtrLooksGood(start_lsn, wal_seg_size) && + startpoint != start_lsn) + { + elog(ERROR, "Start lsn %X/%X wasn't found despite it looks good." + "Got lsn %X/%X instead. " + "(in archive dir %s)", + (uint32_t)(start_lsn>>32), (uint32_t)start_lsn, + (uint32_t)(startpoint>>32), (uint32_t)startpoint, + archivedir); } while (true) @@ -1176,7 +988,7 @@ RunXLogThreads(const char *archivedir, time_t target_time, TransactionId target_xid, XLogRecPtr target_lsn, TimeLineID tli, uint32 segment_size, XLogRecPtr startpoint, XLogRecPtr endpoint, bool consistent_read, xlog_record_function process_record, - XLogRecTarget *last_rec, bool inclusive_endpoint) + XLogRecTarget *last_rec) { pthread_t *threads; xlog_thread_arg *thread_args; @@ -1185,7 +997,8 @@ RunXLogThreads(const char *archivedir, time_t target_time, XLogSegNo endSegNo = 0; bool result = true; - if (!XRecOffIsValid(startpoint) && !XRecOffIsNull(startpoint)) + /* we actually can start from record end */ + if (!XRecEndLooksGood(startpoint, segment_size)) elog(ERROR, "Invalid startpoint value %X/%X", (uint32) (startpoint >> 32), (uint32) (startpoint)); @@ -1197,19 +1010,12 @@ RunXLogThreads(const char *archivedir, time_t target_time, if (!XLogRecPtrIsInvalid(endpoint)) { -// if (XRecOffIsNull(endpoint) && !inclusive_endpoint) - if (XRecOffIsNull(endpoint)) - { - GetXLogSegNo(endpoint, endSegNo, segment_size); + GetXLogSegNo(endpoint, endSegNo, segment_size); + if (endpoint % segment_size == 0) endSegNo--; - } - else if (!XRecOffIsValid(endpoint)) - { + else if (!XRecEndLooksGood(endpoint, segment_size)) elog(ERROR, "Invalid endpoint value %X/%X", (uint32) (endpoint >> 32), (uint32) (endpoint)); - } - else - GetXLogSegNo(endpoint, endSegNo, segment_size); } /* Initialize static variables for workers */ @@ -1244,7 +1050,6 @@ RunXLogThreads(const char *archivedir, time_t target_time, arg->startpoint = startpoint; arg->endpoint = endpoint; arg->endSegNo = endSegNo; - arg->inclusive_endpoint = inclusive_endpoint; arg->got_target = false; /* By default there is some error */ arg->ret = 1; @@ -1356,12 +1161,6 @@ XLogThreadWorker(void *arg) elog(ERROR, "Thread [%d]: out of memory", reader_data->thread_num); xlogreader->system_identifier = instance_config.system_identifier; -#if PG_VERSION_NUM >= 130000 - if (XLogRecPtrIsInvalid(thread_arg->startpoint)) - thread_arg->startpoint = SizeOfXLogShortPHD; - XLogBeginRead(xlogreader, thread_arg->startpoint); -#endif - found = XLogFindNextRecord(xlogreader, thread_arg->startpoint); /* @@ -1479,10 +1278,9 @@ XLogThreadWorker(void *arg) (uint32) (errptr >> 32), (uint32) (errptr)); /* In we failed to read record located at endpoint position, - * and endpoint is not inclusive, do not consider this as an error. + * do not consider this as an error. */ - if (!thread_arg->inclusive_endpoint && - errptr == thread_arg->endpoint) + if (errptr == thread_arg->endpoint) { elog(LOG, "Thread [%d]: Endpoint %X/%X is not inclusive, switch to the next timeline", reader_data->thread_num, @@ -1852,7 +1650,7 @@ validateXLogRecord(XLogReaderState *record, XLogReaderData *reader_data, timestamptz_to_time_t(reader_data->cur_rec.rec_time) >= wal_target_time) *stop_reading = true; /* Check target lsn */ - else if (XRecOffIsValid(wal_target_lsn) && + else if (!XLogRecPtrIsInvalid(wal_target_lsn) && reader_data->cur_rec.rec_lsn >= wal_target_lsn) *stop_reading = true; } @@ -1911,7 +1709,7 @@ bool validate_wal_segment(TimeLineID tli, XLogSegNo segno, const char *prefetch_ rc = RunXLogThreads(prefetch_dir, 0, InvalidTransactionId, InvalidXLogRecPtr, tli, wal_seg_size, - startpoint, endpoint, false, NULL, NULL, true); + startpoint, endpoint, false, NULL, NULL); num_threads = tmp_num_threads; diff --git a/src/pg_probackup.h b/src/pg_probackup.h index e1137746e..24dd660a7 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -125,6 +125,32 @@ extern const char *PROGRAM_EMAIL; #define XRecOffIsNull(xlrp) \ ((xlrp) % XLOG_BLCKSZ == 0) +ft_inline bool +XRecPtrLooksGood(XLogRecPtr xlrp, uint64_t segsize) +{ + uint64_t off = xlrp % segsize; + + /* + * record start is good if + * - if it points after page header + * -- but remember: first segments' page's header is longer + */ + return (off >= SizeOfXLogLongPHD && XRecOffIsValid(off)); +} + +ft_inline bool +XRecEndLooksGood(XLogRecPtr xlrp, uint64_t segsize) +{ + uint64_t off = xlrp % segsize; + + /* + * record end is good if + * - it points to valid record start + * - or it points to block/segment start (actually, end of previous) + */ + return XRecOffIsNull(off) || XRecPtrLooksGood(xlrp, segsize); +} + /* log(2**64) / log(36) = 12.38 => max 13 char + '\0' */ #define base36bufsize 14 @@ -1086,16 +1112,8 @@ extern bool read_recovery_info(const char *archivedir, TimeLineID tli, uint32 seg_size, XLogRecPtr start_lsn, XLogRecPtr stop_lsn, time_t *recovery_time); -extern bool wal_contains_lsn(const char *archivedir, XLogRecPtr target_lsn, - TimeLineID target_tli, uint32 seg_size); extern XLogRecPtr get_prior_record_lsn(const char *archivedir, XLogRecPtr start_lsn, - XLogRecPtr stop_lsn, TimeLineID tli, - bool seek_prev_segment, uint32 seg_size); - -extern XLogRecPtr get_first_record_lsn(const char *archivedir, XLogRecPtr start_lsn, - TimeLineID tli, uint32 wal_seg_size, int timeout); -extern XLogRecPtr get_next_record_lsn(const char *archivedir, XLogSegNo segno, TimeLineID tli, - uint32 wal_seg_size, int timeout, XLogRecPtr target); + XLogRecPtr stop_lsn, TimeLineID tli, uint32 seg_size); /* in util.c */ extern TimeLineID get_current_timeline(PGconn *conn); @@ -1181,7 +1199,9 @@ extern void start_WAL_streaming(PGconn *backup_conn, char *stream_dst_path, ConnectionOptions *conn_opt, XLogRecPtr startpos, TimeLineID starttli, bool is_backup); -extern int wait_WAL_streaming_end(parray *backup_files_list); +extern void wait_WAL_streaming_starts(void); +extern int wait_WAL_streaming_end(parray *backup_files_list, const char* xlog_path, + XLogRecPtr stop_lsn, pgBackup* backup); extern parray* parse_tli_history_buffer(char *history, TimeLineID tli); /* external variables and functions, implemented in backup.c */ @@ -1216,13 +1236,9 @@ extern void pg_stop_backup_consume(PGconn *conn, int server_version, extern void pg_stop_backup_write_file_helper(pioDrive_i drive, const char *path, const char *filename, const char *error_msg_filename, ft_str_t data, parray *file_list); extern XLogRecPtr wait_wal_lsn(const char *wal_segment_dir, XLogRecPtr lsn, bool is_start_lsn, TimeLineID tli, - bool in_prev_segment, bool segment_only, - int timeout_elevel, bool in_stream_dir); -extern void wait_wal_and_calculate_stop_lsn(const char *xlog_path, XLogRecPtr stop_lsn, pgBackup *backup); + bool segment_only, int timeout_elevel); extern int64 calculate_datasize_of_filelist(parray *filelist); -extern void getCurrentStreamPosition(TimeLineID *timeline, XLogRecPtr *ptr, XLogRecPtr *prev); - /* * Slices and arrays for C strings */ diff --git a/src/stream.c b/src/stream.c index 157da740b..f00c28bbb 100644 --- a/src/stream.c +++ b/src/stream.c @@ -610,20 +610,88 @@ start_WAL_streaming(PGconn *backup_conn, char *stream_dst_path, ConnectionOption pthread_create(&stream_thread, NULL, StreamLog, &stream_thread_arg); } +void +wait_WAL_streaming_starts(void) +{ + int timeout; + int try_count; + + if (instance_config.archive_timeout > 0) + timeout = instance_config.archive_timeout; + else + timeout = ARCHIVE_TIMEOUT_DEFAULT; + + for (try_count = 0; try_count < timeout; try_count++) + { + if (stream_ctl.currentpos) + break; + + /* Inform user if WAL streaming didn't start at first attempt */ + if (try_count == 1) + elog(INFO, "Wait for WAL streaming to start"); + + if (interrupted || thread_interrupted) + elog(ERROR, "Interrupted during waiting for WAL streaming"); + + sleep(1); + } +} + /* * Wait for the completion of stream * append list of streamed xlog files * into backup_files_list (if it is not NULL) */ int -wait_WAL_streaming_end(parray *backup_files_list) +wait_WAL_streaming_end(parray *backup_files_list, const char* xlog_path, + XLogRecPtr stop_lsn, pgBackup* backup) { + XLogRecPtr prev; + + stop_backup_lsn = stop_lsn; + pthread_join(stream_thread, NULL); if(backup_files_list != NULL) parray_concat(backup_files_list, xlog_files_list); parray_free(xlog_files_list); - return stream_thread_arg.ret; + if (!stream_thread_arg.ret) + { + elog(INFO, "stop_stream_lsn %X/%X currentpos %X/%X", + (uint32_t)(stop_stream_lsn>>32), (uint32_t)stop_stream_lsn, + (uint32_t)(stream_ctl.currentpos>>32), (uint32_t)stream_ctl.currentpos + ); + } + + if (stream_thread_arg.ret) + return stream_thread_arg.ret; + + /* + * Actually we don't need to check for stop_lsn since we already + * know streamer stopped after stop_lsn. + * Just do it for sanity. + */ + prev = get_prior_record_lsn(xlog_path, + backup->start_lsn, + stop_lsn, backup->tli, + instance_config.xlog_seg_size); + + if (!XLogRecPtrIsInvalid(prev)) + { + /* LSN of the prior record was found */ + elog(LOG, "Found prior LSN: %X/%X", + (uint32) (prev >> 32), (uint32) prev); + /* so write stop_lsn */ + backup->stop_lsn = stop_lsn; + } + + if (XLogRecPtrIsInvalid(backup->stop_lsn)) + { + elog(ERROR, "Couldn't find stop_lsn for stop_stream_lsn %X/%X", + (uint32) (stop_stream_lsn>>32), (uint32) stop_stream_lsn); + } + + return stream_thread_arg.ret; } /* Append streamed WAL segment to filelist */ @@ -701,12 +769,4 @@ add_history_file_to_filelist(parray *filelist, uint32 timeline, char *basedir) file = pgFileNew(fullpath, relpath, false, do_crc, drive); parray_append(filelist, file); -} - -void -getCurrentStreamPosition(TimeLineID *timeline, XLogRecPtr *ptr, XLogRecPtr *prev) -{ - *ptr = stream_ctl.currentpos; - *prev = stream_ctl.prevpos; - *timeline = stream_ctl.timeline; -} +} \ No newline at end of file From bc0ea49df669da8c2854d46b002501aa72ed1dbb Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 25 Dec 2022 21:31:10 +0300 Subject: [PATCH 318/339] [PBCKP-434] stabilize backup_test there was some nose-to-nose timing, so it could sporadically fail. And in fact timeouts need no to be so large. Reduce them a bit. --- tests/backup_test.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/backup_test.py b/tests/backup_test.py index bbad8a591..59b16c7ac 100644 --- a/tests/backup_test.py +++ b/tests/backup_test.py @@ -1753,7 +1753,7 @@ def test_backup_with_least_privileges_role(self): set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums'], - pg_options={'archive_timeout': '30s'}) + pg_options={'archive_timeout': '10s'}) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -2080,14 +2080,14 @@ def test_backup_with_less_privileges_role(self): ptrack_enable=self.ptrack, initdb_params=['--data-checksums'], pg_options={ - 'archive_timeout': '30s', + 'archive_timeout': '10s', 'archive_mode': 'always', - 'checkpoint_timeout': '60s', + 'checkpoint_timeout': '30s', 'wal_level': 'logical'}) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) - self.set_config(backup_dir, 'node', options=['--archive-timeout=60s']) + self.set_config(backup_dir, 'node', options=['--archive-timeout=30s']) self.set_archiving(backup_dir, 'node', node) node.slow_start() @@ -2186,7 +2186,7 @@ def test_backup_with_less_privileges_role(self): self.add_instance(backup_dir, 'replica', replica) self.set_config( backup_dir, 'replica', - options=['--archive-timeout=120s', '--log-level-console=LOG']) + options=['--archive-timeout=60s', '--log-level-console=LOG']) self.set_archiving(backup_dir, 'replica', replica, replica=True) self.set_auto_conf(replica, {'hot_standby': 'on'}) @@ -2218,7 +2218,7 @@ def test_backup_with_less_privileges_role(self): self.switch_wal_segment(node) self.backup_node( backup_dir, 'replica', replica, backup_type='page', - datname='backupdb', options=['-U', 'backup', '--archive-timeout=30s']) + datname='backupdb', options=['-U', 'backup', '--archive-timeout=20s']) self.backup_node( backup_dir, 'replica', replica, backup_type='page', From bed69c3ba435f8bc2a1760105286ad91e1c076e0 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 25 Dec 2022 21:33:43 +0300 Subject: [PATCH 319/339] [PBCKP-434] fix replica_test.py There are a lot less messages to check for. And in fact backup in test_archive_replica_not_null_offset starts to succeed. --- tests/replica_test.py | 76 +++++++++---------------------------------- 1 file changed, 16 insertions(+), 60 deletions(-) diff --git a/tests/replica_test.py b/tests/replica_test.py index 9d48a5a04..297ebc97d 100644 --- a/tests/replica_test.py +++ b/tests/replica_test.py @@ -610,25 +610,9 @@ def test_replica_stop_lsn_null_offset(self): '--stream'], return_id=False) - self.assertIn( - 'LOG: Invalid offset in stop_lsn value 0/4000000', - output) - - self.assertIn( - 'WARNING: WAL segment 000000010000000000000004 could not be streamed in 30 seconds', - output) - - self.assertIn( - 'WARNING: Failed to get next WAL record after 0/4000000, looking for previous WAL record', - output) - - self.assertIn( - 'LOG: Looking for LSN 0/4000000 in segment: 000000010000000000000003', - output) - self.assertIn( 'has endpoint 0/4000000 which is ' - 'equal or greater than requested LSN 0/4000000', + 'equal or greater than requested LSN', output) self.assertIn( @@ -715,19 +699,16 @@ def test_replica_stop_lsn_null_offset_next_record(self): log_content = f.read() self.assertIn( - 'LOG: Invalid offset in stop_lsn value 0/4000000', - log_content) - - self.assertIn( - 'LOG: Looking for segment: 000000010000000000000004', + 'has endpoint 0/4000000 which is ' + 'equal or greater than requested LSN', log_content) self.assertIn( - 'LOG: First record in WAL segment "000000010000000000000004": 0/4000028', + 'LOG: Found prior LSN:', log_content) self.assertIn( - 'INFO: stop_lsn: 0/4000000', + 'INFO: backup->stop_lsn 0/4000000', log_content) self.assertTrue(self.show_pb(backup_dir, 'replica')[0]['status'] == 'DONE') @@ -782,18 +763,6 @@ def test_archive_replica_null_offset(self): '--no-validate'], return_id=False) - self.assertIn( - 'LOG: Invalid offset in stop_lsn value 0/4000000', - output) - - self.assertIn( - 'WARNING: WAL segment 000000010000000000000004 could not be archived in 30 seconds', - output) - - self.assertIn( - 'WARNING: Failed to get next WAL record after 0/4000000, looking for previous WAL record', - output) - self.assertIn( 'LOG: Looking for LSN 0/4000000 in segment: 000000010000000000000003', output) @@ -807,8 +776,6 @@ def test_archive_replica_null_offset(self): 'LOG: Found prior LSN:', output) - print(output) - # @unittest.skip("skip") def test_archive_replica_not_null_offset(self): """ @@ -849,29 +816,22 @@ def test_archive_replica_not_null_offset(self): '--no-validate'], return_id=False) - with self.assertRaises(ProbackupException) as ctx: - self.backup_node( - backup_dir, 'node', replica, replica.data_dir, - options=[ - '--archive-timeout=10', - '--log-level-console=LOG', - '--no-validate']) - e = ctx.exception - # vanilla -- 0/4000060 - # pgproee -- 0/4000078 - self.assertRegex( - e.message, - r'LOG: Looking for LSN (0/4000060|0/4000078|0/4000070) in segment: 000000010000000000000004', - "\n CMD: {0}".format(self.cmd)) + output = self.backup_node( + backup_dir, 'node', replica, replica.data_dir, + options=[ + '--archive-timeout=10', + '--log-level-console=LOG', + '--no-validate'], + return_id=False) self.assertRegex( - e.message, - r'INFO: Wait for LSN (0/4000060|0/4000078|0/4000070) in archived WAL segment', + output, + r'LOG: Record \S+ has endpoint 0/4000000 which is equal.*0/4000000', "\n CMD: {0}".format(self.cmd)) self.assertRegex( - e.message, - r'ERROR: WAL segment 000000010000000000000004 could not be archived in \d+ seconds', + output, + r'INFO: Backup \w+ completed\s*\Z', "\n CMD: {0}".format(self.cmd)) # @unittest.skip("skip") @@ -942,10 +902,6 @@ def test_replica_toast(self): pgdata = self.pgdata_content(replica.data_dir) - self.assertIn( - 'WARNING: Could not read WAL record at', - output) - self.assertIn( 'LOG: Found prior LSN:', output) From ecf0a7de4069c35639be5037ee4d88cbba94f120 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sun, 25 Dec 2022 22:10:57 +0300 Subject: [PATCH 320/339] [PBCKP-434] prefer to RunXLogThreads with consistent_read=true I doubt 'false' is ever usable. --- src/parsexlog.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/parsexlog.c b/src/parsexlog.c index 821ff1783..daf827dc9 100644 --- a/src/parsexlog.c +++ b/src/parsexlog.c @@ -245,7 +245,7 @@ extractPageMap(const char *archivedir, uint32 wal_seg_size, /* easy case */ extract_isok = RunXLogThreads(archivedir, 0, InvalidTransactionId, InvalidXLogRecPtr, end_tli, wal_seg_size, - startpoint, endpoint, false, extractPageInfo, + startpoint, endpoint, true, extractPageInfo, NULL); else { @@ -330,7 +330,7 @@ extractPageMap(const char *archivedir, uint32 wal_seg_size, extract_isok = RunXLogThreads(archivedir, 0, InvalidTransactionId, InvalidXLogRecPtr, tmp_interval->tli, wal_seg_size, tmp_interval->begin_lsn, tmp_interval->end_lsn, - false, extractPageInfo, NULL); + true, extractPageInfo, NULL); if (!extract_isok) break; @@ -359,7 +359,7 @@ validate_backup_wal_from_start_to_stop(pgBackup *backup, got_endpoint = RunXLogThreads(archivedir, 0, InvalidTransactionId, InvalidXLogRecPtr, tli, xlog_seg_size, backup->start_lsn, backup->stop_lsn, - false, NULL, NULL); + true, NULL, NULL); if (!got_endpoint) { @@ -1161,6 +1161,11 @@ XLogThreadWorker(void *arg) elog(ERROR, "Thread [%d]: out of memory", reader_data->thread_num); xlogreader->system_identifier = instance_config.system_identifier; + elog(LOG, "Thread [%d]: Starting LSN: %X/%X , end: %X/%X", + reader_data->thread_num, + (uint32) (thread_arg->startpoint >> 32), (uint32) (thread_arg->startpoint), + (uint32) (thread_arg->endpoint >> 32), (uint32) (thread_arg->endpoint)); + found = XLogFindNextRecord(xlogreader, thread_arg->startpoint); /* @@ -1190,11 +1195,6 @@ XLogThreadWorker(void *arg) thread_arg->startpoint = found; - elog(VERBOSE, "Thread [%d]: Starting LSN: %X/%X", - reader_data->thread_num, - (uint32) (thread_arg->startpoint >> 32), - (uint32) (thread_arg->startpoint)); - while (need_read) { XLogRecord *record; From ed609d8a3c7259178e0e12ff5eb964697c35316e Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 26 Dec 2022 12:22:30 +0300 Subject: [PATCH 321/339] again test_backup_with_less_privileges_role Try to delay switch_wal_segment to stabilize --- tests/backup_test.py | 31 ++++++++++++++++--------------- tests/helpers/ptrack_helpers.py | 21 ++++++++++++++++++--- 2 files changed, 34 insertions(+), 18 deletions(-) diff --git a/tests/backup_test.py b/tests/backup_test.py index 59b16c7ac..c894c865f 100644 --- a/tests/backup_test.py +++ b/tests/backup_test.py @@ -2203,9 +2203,10 @@ def test_backup_with_less_privileges_role(self): # self.switch_wal_segment(node) # self.switch_wal_segment(node) - self.backup_node( - backup_dir, 'replica', replica, - datname='backupdb', options=['-U', 'backup']) + with self.switch_wal_after(node, 10): + self.backup_node( + backup_dir, 'replica', replica, + datname='backupdb', options=['-U', 'backup']) # stream full backup from replica self.backup_node( @@ -2215,30 +2216,30 @@ def test_backup_with_less_privileges_role(self): # self.switch_wal_segment(node) # PAGE backup from replica - self.switch_wal_segment(node) - self.backup_node( - backup_dir, 'replica', replica, backup_type='page', - datname='backupdb', options=['-U', 'backup', '--archive-timeout=20s']) + with self.switch_wal_after(node, 10): + self.backup_node( + backup_dir, 'replica', replica, backup_type='page', + datname='backupdb', options=['-U', 'backup', '--archive-timeout=20s']) self.backup_node( backup_dir, 'replica', replica, backup_type='page', datname='backupdb', options=['--stream', '-U', 'backup']) # DELTA backup from replica - self.switch_wal_segment(node) - self.backup_node( - backup_dir, 'replica', replica, backup_type='delta', - datname='backupdb', options=['-U', 'backup']) + with self.switch_wal_after(node, 10): + self.backup_node( + backup_dir, 'replica', replica, backup_type='delta', + datname='backupdb', options=['-U', 'backup']) self.backup_node( backup_dir, 'replica', replica, backup_type='delta', datname='backupdb', options=['--stream', '-U', 'backup']) # PTRACK backup from replica if self.ptrack: - self.switch_wal_segment(node) - self.backup_node( - backup_dir, 'replica', replica, backup_type='ptrack', - datname='backupdb', options=['-U', 'backup']) + with self.switch_wal_after(node, 10): + self.backup_node( + backup_dir, 'replica', replica, backup_type='ptrack', + datname='backupdb', options=['-U', 'backup']) self.backup_node( backup_dir, 'replica', replica, backup_type='ptrack', datname='backupdb', options=['--stream', '-U', 'backup']) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 936b55458..37d806255 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1,6 +1,7 @@ # you need os for unittest to work import os import gc +import threading import unittest from sys import exit, argv, version_info import signal @@ -16,6 +17,7 @@ import re import json import random +import contextlib idx_ptrack = { 't_heap': { @@ -1653,7 +1655,7 @@ def version_to_num(self, version): num = num * 100 + int(re.sub(r"[^\d]", "", part)) return num - def switch_wal_segment(self, node): + def switch_wal_segment(self, node, sleep_seconds=1): """ Execute pg_switch_wal() in given node @@ -1661,11 +1663,24 @@ def switch_wal_segment(self, node): node: an instance of PostgresNode or NodeConnection class """ if isinstance(node, testgres.PostgresNode): - node.safe_psql('postgres', 'select pg_switch_wal()') + with node.connect('postgres') as con: + con.execute('select txid_current()') + con.execute('select pg_switch_wal()') else: node.execute('select pg_switch_wal()') - sleep(1) + if sleep_seconds > 0: + sleep(sleep_seconds) + + @contextlib.contextmanager + def switch_wal_after(self, node, seconds): + tm = threading.Timer(seconds, self.switch_wal_segment, [node, 0]) + tm.start() + try: + yield + finally: + tm.cancel() + tm.join() def wait_until_replica_catch_with_master(self, master, replica): From 0cf3d050f639e8a42bc2e5a433e0cb790e397c30 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 26 Dec 2022 12:28:17 +0300 Subject: [PATCH 322/339] [PBCKP-434] make stop_backup_lsn volatile. Well, it is not really enough. We must use atomic. Lets do it later. --- src/pg_probackup.h | 1 - src/stream.c | 3 ++- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 24dd660a7..78ff15ab0 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -1194,7 +1194,6 @@ extern void datapagemap_print_debug(datapagemap_t *map); /* in stream.c */ -extern XLogRecPtr stop_backup_lsn; extern void start_WAL_streaming(PGconn *backup_conn, char *stream_dst_path, ConnectionOptions *conn_opt, XLogRecPtr startpos, TimeLineID starttli, diff --git a/src/stream.c b/src/stream.c index f00c28bbb..22395e878 100644 --- a/src/stream.c +++ b/src/stream.c @@ -26,7 +26,8 @@ static int standby_message_timeout = 10 * 1000; /* stop_backup_lsn is set by pg_stop_backup() to stop streaming */ -XLogRecPtr stop_backup_lsn = InvalidXLogRecPtr; +/* TODO: use atomic */ +static volatile XLogRecPtr stop_backup_lsn = InvalidXLogRecPtr; static XLogRecPtr stop_stream_lsn = InvalidXLogRecPtr; /* From dab39571f2ab5bec949b328f14d88052c688968a Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 26 Dec 2022 13:27:33 +0300 Subject: [PATCH 323/339] copy-paste some test fixes authors: Victoria Shepard and Sergey Fukanchik --- tests/helpers/ptrack_helpers.py | 17 ++++++++++------- tests/ptrack_test.py | 1 - 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 37d806255..b683ea231 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1,18 +1,14 @@ # you need os for unittest to work import os -import gc import threading import unittest -from sys import exit, argv, version_info +from sys import exit, argv import signal import subprocess import shutil -import six import testgres import hashlib -import re import getpass -import select from time import sleep import re import json @@ -143,6 +139,7 @@ def __init__(self, message, cmd): def __str__(self): return '\n ERROR: {0}\n CMD: {1}'.format(repr(self.message), self.cmd) + class PostgresNodeExtended(testgres.PostgresNode): def __init__(self, base_dir=None, *args, **kwargs): @@ -227,6 +224,7 @@ def table_checksum(self, table, dbname="postgres"): con.close() return sum.hexdigest() + class ProbackupTest(object): # Class attributes enterprise = is_enterprise() @@ -238,8 +236,13 @@ def __init__(self, *args, **kwargs): self.nodes_to_cleanup = [] if isinstance(self, unittest.TestCase): - self.module_name = self.id().split('.')[1] - self.fname = self.id().split('.')[3] + try: + self.module_name = self.id().split('.')[-2] + self.fname = self.id().split('.')[-1] + except IndexError: + print("Couldn't get module name and function name from self.id(): `{}`".format(self.id())) + self.module_name = self.module_name if self.module_name else str(self).split('(')[1].split('.')[1] + self.fname = str(self).split('(')[0] if '-v' in argv or '--verbose' in argv: self.verbose = True diff --git a/tests/ptrack_test.py b/tests/ptrack_test.py index 709f64163..e24ddfe47 100644 --- a/tests/ptrack_test.py +++ b/tests/ptrack_test.py @@ -14,7 +14,6 @@ class PtrackTest(ProbackupTest, unittest.TestCase): def setUp(self): if self.pg_config_version < self.version_to_num('11.0'): self.skipTest('You need PostgreSQL >= 11 for this test') - self.fname = self.id().split('.')[3] # @unittest.skip("skip") def test_drop_rel_during_backup_ptrack(self): From 03e5dce7680cbf34a1801fd7de0ae849c7fe8b35 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 27 Dec 2022 08:56:35 +0300 Subject: [PATCH 324/339] fix pioLocalReadFile_pioRead --- src/utils/file.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/utils/file.c b/src/utils/file.c index 714d05fa9..835f21e22 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -2850,7 +2850,7 @@ pioLocalReadFile_pioRead(VSelf, ft_bytes_t buf, err_i *err) ft_bytes_move(&buf, &self->remain); - while (buf.len && $noerr(*err)) + while (buf.len) { ft_assert(self->remain.len == 0); @@ -2860,7 +2860,7 @@ pioLocalReadFile_pioRead(VSelf, ft_bytes_t buf, err_i *err) if (r < 0) *err = $syserr(errno, "Cannot read from {path:q}", path(self->path.ptr)); - else if (r == 0) + if (r <= 0) break; if (to_read.ptr == buf.ptr) From cb19d6a24b86bdd0ecc45ff5091b3f0e001bb2d2 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 27 Dec 2022 13:19:25 +0300 Subject: [PATCH 325/339] fix replica_tests stop_lsn_null_offset --- tests/helpers/ptrack_helpers.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index b683ea231..6949609f5 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1658,7 +1658,7 @@ def version_to_num(self, version): num = num * 100 + int(re.sub(r"[^\d]", "", part)) return num - def switch_wal_segment(self, node, sleep_seconds=1): + def switch_wal_segment(self, node, sleep_seconds=1, and_tx=False): """ Execute pg_switch_wal() in given node @@ -1667,7 +1667,8 @@ def switch_wal_segment(self, node, sleep_seconds=1): """ if isinstance(node, testgres.PostgresNode): with node.connect('postgres') as con: - con.execute('select txid_current()') + if and_tx: + con.execute('select txid_current()') con.execute('select pg_switch_wal()') else: node.execute('select pg_switch_wal()') @@ -1677,7 +1678,7 @@ def switch_wal_segment(self, node, sleep_seconds=1): @contextlib.contextmanager def switch_wal_after(self, node, seconds): - tm = threading.Timer(seconds, self.switch_wal_segment, [node, 0]) + tm = threading.Timer(seconds, self.switch_wal_segment, [node, 0, True]) tm.start() try: yield From f5c0980d1cd4eb3e1766895e13842f55c510d035 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 27 Dec 2022 13:32:45 +0300 Subject: [PATCH 326/339] try to fix test_archive_replica_null_offset --- tests/helpers/ptrack_helpers.py | 4 ++-- tests/replica_test.py | 18 +++++++++--------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 6949609f5..7fbcc7820 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1677,8 +1677,8 @@ def switch_wal_segment(self, node, sleep_seconds=1, and_tx=False): sleep(sleep_seconds) @contextlib.contextmanager - def switch_wal_after(self, node, seconds): - tm = threading.Timer(seconds, self.switch_wal_segment, [node, 0, True]) + def switch_wal_after(self, node, seconds, and_tx=True): + tm = threading.Timer(seconds, self.switch_wal_segment, [node, 0, and_tx]) tm.start() try: yield diff --git a/tests/replica_test.py b/tests/replica_test.py index 297ebc97d..e044e027c 100644 --- a/tests/replica_test.py +++ b/tests/replica_test.py @@ -751,17 +751,17 @@ def test_archive_replica_null_offset(self): replica.slow_start(replica=True) - self.switch_wal_segment(master) self.switch_wal_segment(master) - # take backup from replica - output = self.backup_node( - backup_dir, 'node', replica, replica.data_dir, - options=[ - '--archive-timeout=30', - '--log-level-console=LOG', - '--no-validate'], - return_id=False) + with self.switch_wal_after(master, 10): + # take backup from replica + output = self.backup_node( + backup_dir, 'node', replica, replica.data_dir, + options=[ + '--archive-timeout=30', + '--log-level-console=LOG', + '--no-validate'], + return_id=False) self.assertIn( 'LOG: Looking for LSN 0/4000000 in segment: 000000010000000000000003', From f8b480e7c68ed87ce338112161e1a9f1eedcab73 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 27 Dec 2022 14:44:41 +0300 Subject: [PATCH 327/339] try to fix test_archive_get_batching_sanity --- tests/archive_test.py | 31 +++++++++++++++++-------------- tests/helpers/ptrack_helpers.py | 18 ++++++++++++++++++ 2 files changed, 35 insertions(+), 14 deletions(-) diff --git a/tests/archive_test.py b/tests/archive_test.py index b26f58909..737b935bd 100644 --- a/tests/archive_test.py +++ b/tests/archive_test.py @@ -3,6 +3,7 @@ import gzip import unittest from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, GdbException +from .helpers.ptrack_helpers import tail_file from datetime import datetime, timedelta import subprocess from sys import exit @@ -2204,7 +2205,7 @@ def test_archive_get_prefetch_corruption(self): self.backup_node(backup_dir, 'node', node, options=['--stream']) - node.pgbench_init(scale=50) + node.pgbench_init(scale=20) replica = self.make_simple_node( base_dir=os.path.join(self.module_name, self.fname, 'replica')) @@ -2251,7 +2252,7 @@ def test_archive_get_prefetch_corruption(self): # generate WAL, copy it into prefetch directory, then corrupt # some segment - node.pgbench_init(scale=20) + node.pgbench_init(scale=5) sleep(20) # now copy WAL files into prefetch directory and corrupt some of them @@ -2304,18 +2305,20 @@ def test_archive_get_prefetch_corruption(self): os.remove(os.path.join(replica.logs_dir, 'postgresql.log')) replica.slow_start(replica=True) - sleep(60) - - with open(os.path.join(replica.logs_dir, 'postgresql.log'), 'r') as f: - postgres_log_content = f.read() - - self.assertIn( - 'Prefetched WAL segment {0} is invalid, cannot use it'.format(filename), - postgres_log_content) - - self.assertIn( - 'LOG: restored log file "{0}" from archive'.format(filename), - postgres_log_content) + prefetch_line = 'Prefetched WAL segment {0} is invalid, cannot use it'.format(filename) + has_prefetch_line = False + restored_line = 'LOG: restored log file "{0}" from archive'.format(filename) + has_restored_line = False + for line in tail_file(os.path.join(replica.logs_dir, 'postgresql.log')): + if not has_prefetch_line: + has_prefetch_line = prefetch_line in line + if not has_restored_line: + has_restored_line = restored_line in line + if has_prefetch_line and has_restored_line: + break + + self.assertTrue(has_prefetch_line) + self.assertTrue(has_restored_line) # @unittest.skip("skip") def test_archive_show_partial_files_handling(self): diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 7fbcc7820..6f23f8804 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -10,6 +10,7 @@ import hashlib import getpass from time import sleep +from time import time import re import json import random @@ -130,6 +131,23 @@ def base36enc(number): return sign + base36 +def tail_file(file, linetimeout=10, totaltimeout = 60): + start = time() + with open(file, 'r') as f: + waits = 0 + while waits < linetimeout: + line = f.readline() + if line == '': + waits += 1 + sleep(1) + continue + waits = 0 + yield line + if time() - start > totaltimeout: + raise TimeoutError("total timeout tailing %s"%(file,)) + else: + return # ok + raise TimeoutError("line timeout tailing %s"%(file,)) class ProbackupException(Exception): def __init__(self, message, cmd): From 77fc12ac878f7103a2cb9897596c04dcab5e1ace Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 27 Dec 2022 14:50:31 +0300 Subject: [PATCH 328/339] gdb kill did quit --- tests/helpers/ptrack_helpers.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 6f23f8804..6b38a0ac1 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -2070,6 +2070,7 @@ def get_line(self): return line def kill(self): + self._did_quit = True self.proc.kill() self.proc.wait(3) self.proc.stdin.close() From f38ac068723c750481c7bf80a64e573b9f038dbd Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 28 Dec 2022 09:54:53 +0300 Subject: [PATCH 329/339] reset timeouts in test_archive_replica_not_null_offset --- tests/replica_test.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/replica_test.py b/tests/replica_test.py index e044e027c..622647c3a 100644 --- a/tests/replica_test.py +++ b/tests/replica_test.py @@ -786,6 +786,7 @@ def test_archive_replica_not_null_offset(self): set_replication=True, initdb_params=['--data-checksums'], pg_options={ + 'archive_timeout' : '10s', 'checkpoint_timeout': '1h', 'wal_level': 'replica'}) @@ -812,14 +813,14 @@ def test_archive_replica_not_null_offset(self): self.backup_node( backup_dir, 'node', replica, replica.data_dir, options=[ - '--archive-timeout=10', + '--archive-timeout=30', '--no-validate'], return_id=False) output = self.backup_node( backup_dir, 'node', replica, replica.data_dir, options=[ - '--archive-timeout=10', + '--archive-timeout=30', '--log-level-console=LOG', '--no-validate'], return_id=False) From b2cf9afb0d5ba2c63df75999f82c8516e0307d94 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 28 Dec 2022 12:31:28 +0300 Subject: [PATCH 330/339] @test_needs_gdb - make function decorator - check for decorator in GDBObj - check for linux's ptrace --- tests/archive_test.py | 6 ++--- tests/backup_test.py | 19 +++++++------- tests/checkdb_test.py | 5 ++-- tests/delta_test.py | 3 ++- tests/helpers/ptrack_helpers.py | 45 ++++++++++++++++++++++++++++----- tests/locking_test.py | 19 +++++++------- tests/logging_test.py | 3 ++- tests/merge_test.py | 33 ++++++++++++------------ tests/pgpro2068_test.py | 3 ++- tests/ptrack_test.py | 9 ++++--- tests/replica_test.py | 11 ++++---- tests/restore_test.py | 5 ++-- tests/retention_test.py | 13 +++++----- tests/validate_test.py | 5 ++-- 14 files changed, 111 insertions(+), 68 deletions(-) diff --git a/tests/archive_test.py b/tests/archive_test.py index 737b935bd..3a5679e37 100644 --- a/tests/archive_test.py +++ b/tests/archive_test.py @@ -3,7 +3,7 @@ import gzip import unittest from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, GdbException -from .helpers.ptrack_helpers import tail_file +from .helpers.ptrack_helpers import tail_file, test_needs_gdb from datetime import datetime, timedelta import subprocess from sys import exit @@ -205,12 +205,12 @@ def test_pgpro434_2(self): 'data after restore not equal to original data') # @unittest.skip("skip") + @test_needs_gdb def test_pgpro434_3(self): """ Check pg_stop_backup_timeout, needed backup_timeout Fixed in commit d84d79668b0c139 and assert fixed by ptrack 1.7 """ - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( @@ -262,12 +262,12 @@ def test_pgpro434_3(self): 'PostgreSQL crashed because of a failed assert') # @unittest.skip("skip") + @test_needs_gdb def test_pgpro434_4(self): """ Check pg_stop_backup_timeout, libpq-timeout requested. Fixed in commit d84d79668b0c139 and assert fixed by ptrack 1.7 """ - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( diff --git a/tests/backup_test.py b/tests/backup_test.py index c894c865f..b604db844 100644 --- a/tests/backup_test.py +++ b/tests/backup_test.py @@ -3,6 +3,7 @@ import re from time import sleep, time from .helpers.ptrack_helpers import base36enc, ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import test_needs_gdb import shutil from distutils.dir_util import copy_tree from testgres import ProcessType, QueryException @@ -1090,9 +1091,9 @@ def test_tablespace_handling_2(self): repr(e.message), self.cmd)) # @unittest.skip("skip") + @test_needs_gdb def test_drop_rel_during_full_backup(self): """""" - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( @@ -1233,9 +1234,9 @@ def test_drop_db_during_full_backup(self): self.compare_pgdata(pgdata, pgdata_restored) # @unittest.skip("skip") + @test_needs_gdb def test_drop_rel_during_backup_delta(self): """""" - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( @@ -1300,9 +1301,9 @@ def test_drop_rel_during_backup_delta(self): self.compare_pgdata(pgdata, pgdata_restored) # @unittest.skip("skip") + @test_needs_gdb def test_drop_rel_during_backup_page(self): """""" - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( @@ -1419,9 +1420,9 @@ def test_basic_temp_slot_for_stream_backup(self): options=['--stream', '--slot=slot_1', '--temp-slot']) # @unittest.skip("skip") + @test_needs_gdb def test_backup_concurrent_drop_table(self): """""" - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( @@ -1547,9 +1548,9 @@ def test_pg_11_adjusted_wal_segment_size(self): self.compare_pgdata(pgdata, pgdata_restored) # @unittest.skip("skip") + @test_needs_gdb def test_sigint_handling(self): """""" - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( @@ -1584,9 +1585,9 @@ def test_sigint_handling(self): 'Backup STATUS should be "ERROR"') # @unittest.skip("skip") + @test_needs_gdb def test_sigterm_handling(self): """""" - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( @@ -1620,9 +1621,9 @@ def test_sigterm_handling(self): 'Backup STATUS should be "ERROR"') # @unittest.skip("skip") + @test_needs_gdb def test_sigquit_handling(self): """""" - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( @@ -2724,9 +2725,9 @@ def test_incr_backup_filenode_map(self): 'select 1') # @unittest.skip("skip") + @test_needs_gdb def test_missing_wal_segment(self): """""" - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( @@ -3056,9 +3057,9 @@ def test_basic_backup_default_transaction_read_only(self): self.backup_node(backup_dir, 'node', node, backup_type='page') # @unittest.skip("skip") + @test_needs_gdb def test_backup_atexit(self): """""" - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( diff --git a/tests/checkdb_test.py b/tests/checkdb_test.py index b8fb59483..92244f345 100644 --- a/tests/checkdb_test.py +++ b/tests/checkdb_test.py @@ -1,6 +1,7 @@ import os import unittest from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import test_needs_gdb from datetime import datetime, timedelta import subprocess from testgres import QueryException @@ -12,9 +13,9 @@ class CheckdbTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") + @test_needs_gdb def test_checkdb_amcheck_only_sanity(self): """""" - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( @@ -549,9 +550,9 @@ def test_checkdb_checkunique(self): node.stop() # @unittest.skip("skip") + @test_needs_gdb def test_checkdb_sigint_handling(self): """""" - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( diff --git a/tests/delta_test.py b/tests/delta_test.py index 8736a079c..ab7064305 100644 --- a/tests/delta_test.py +++ b/tests/delta_test.py @@ -1,6 +1,7 @@ import os import unittest from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import test_needs_gdb from datetime import datetime, timedelta from testgres import QueryException import subprocess @@ -438,12 +439,12 @@ def test_delta_multiple_segments(self): self.compare_pgdata(pgdata, pgdata_restored) # @unittest.skip("skip") + @test_needs_gdb def test_delta_vacuum_full(self): """ make node, make full and delta stream backups, restore them and check data correctness """ - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 6b38a0ac1..38ae64c8e 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1,5 +1,6 @@ # you need os for unittest to work import os +import sys import threading import unittest from sys import exit, argv @@ -292,8 +293,7 @@ def __init__(self, *args, **kwargs): self.test_env['LC_MESSAGES'] = 'C' self.test_env['LC_TIME'] = 'C' - self.gdb = 'PGPROBACKUP_GDB' in self.test_env and \ - self.test_env['PGPROBACKUP_GDB'] == 'ON' + self._set_gdb() self.paranoia = 'PG_PROBACKUP_PARANOIA' in self.test_env and \ self.test_env['PG_PROBACKUP_PARANOIA'] == 'ON' @@ -422,6 +422,20 @@ def __init__(self, *args, **kwargs): os.environ["PGAPPNAME"] = "pg_probackup" + def _set_gdb(self): + self._gdb_enabled = self.test_env.get('PGPROBACKUP_GDB') == 'ON' + self._gdb_ok = self._gdb_enabled + if not self._gdb_enabled or sys.platform != 'linux': + return + try: + with open('/proc/sys/kernel/yama/ptrace_scope') as f: + ptrace = f.read() + except FileNotFoundError: + self._gdb_ptrace_ok = True + return + self._gdb_ptrace_ok = int(ptrace) == 0 + self._gdb_ok = self._gdb_ok and self._gdb_ptrace_ok + def is_test_result_ok(test_case): # sources of solution: # 1. python versions 2.7 - 3.10, verified on 3.10, 3.7, 2.7, taken from: @@ -1979,12 +1993,26 @@ def gdb_attach(self, pid): return GDBobj([str(pid)], self, attach=True) def _check_gdb_flag_or_skip_test(self): - if not self.gdb: + if not self._gdb_enabled: self.skipTest( "Specify PGPROBACKUP_GDB and build without " "optimizations for run this test" ) + if self._gdb_ok: + return + if not self._gdb_ptrace_ok: + self.fail("set /proc/sys/kernel/yama/ptrace_scope to 0" + " to run GDB tests") + else: + self.fail("use of gdb is not possible") +def test_needs_gdb(func): + def wrapped(self): + self._gdb_decorated = True + self._check_gdb_flag_or_skip_test() + func(self) + wrapped.__doc__ = func.__doc__ + return wrapped class GdbException(Exception): def __init__(self, message="False"): @@ -2001,10 +2029,13 @@ def __init__(self, cmd, env, attach=False): self._did_quit = False # Check gdb flag is set up - if not env.gdb: - raise GdbException("No `PGPROBACKUP_GDB=on` is set, " - "test should call ProbackupTest::check_gdb_flag_or_skip_test() on its start " - "and be skipped") + if not getattr(env, "_gdb_decorated", False): + raise GdbException("Test should be marked with @test_needs_gdb") + if not env._gdb_enabled: + raise GdbException("No `PGPROBACKUP_GDB=on` is set.") + if not env._gdb_ok: + raise GdbException("No gdb usage possible.") + # Check gdb presense try: gdb_version, _ = subprocess.Popen( diff --git a/tests/locking_test.py b/tests/locking_test.py index 5367c2610..fe5093b92 100644 --- a/tests/locking_test.py +++ b/tests/locking_test.py @@ -2,19 +2,20 @@ import os from time import sleep from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import test_needs_gdb class LockingTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") # @unittest.expectedFailure + @test_needs_gdb def test_locking_running_validate_1(self): """ make node, take full backup, stop it in the middle run validate, expect it to successfully executed, concurrent RUNNING backup with pid file and active process is legal """ - self._check_gdb_flag_or_skip_test() node = self.make_simple_node( base_dir=os.path.join(self.module_name, self.fname, 'node'), @@ -61,6 +62,7 @@ def test_locking_running_validate_1(self): # Clean after yourself gdb.kill() + @test_needs_gdb def test_locking_running_validate_2(self): """ make node, take full backup, stop it in the middle, @@ -69,7 +71,6 @@ def test_locking_running_validate_2(self): RUNNING backup with pid file AND without active pid is legal, but his status must be changed to ERROR and pid file is deleted """ - self._check_gdb_flag_or_skip_test() node = self.make_simple_node( base_dir=os.path.join(self.module_name, self.fname, 'node'), @@ -130,6 +131,7 @@ def test_locking_running_validate_2(self): # Clean after yourself gdb.kill() + @test_needs_gdb def test_locking_running_validate_2_specific_id(self): """ make node, take full backup, stop it in the middle, @@ -139,7 +141,6 @@ def test_locking_running_validate_2_specific_id(self): RUNNING backup with pid file AND without active pid is legal, but his status must be changed to ERROR and pid file is deleted """ - self._check_gdb_flag_or_skip_test() node = self.make_simple_node( base_dir=os.path.join(self.module_name, self.fname, 'node'), @@ -229,6 +230,7 @@ def test_locking_running_validate_2_specific_id(self): # Clean after yourself gdb.kill() + @test_needs_gdb def test_locking_running_3(self): """ make node, take full backup, stop it in the middle, @@ -237,7 +239,6 @@ def test_locking_running_3(self): RUNNING backup without pid file AND without active pid is legal, his status must be changed to ERROR """ - self._check_gdb_flag_or_skip_test() node = self.make_simple_node( base_dir=os.path.join(self.module_name, self.fname, 'node'), @@ -299,6 +300,7 @@ def test_locking_running_3(self): # Clean after yourself gdb.kill() + @test_needs_gdb def test_locking_restore_locked(self): """ make node, take full backup, take two page backups, @@ -307,7 +309,6 @@ def test_locking_restore_locked(self): Expect restore to sucseed because read-only locks do not conflict """ - self._check_gdb_flag_or_skip_test() node = self.make_simple_node( base_dir=os.path.join(self.module_name, self.fname, 'node'), @@ -341,6 +342,7 @@ def test_locking_restore_locked(self): # Clean after yourself gdb.kill() + @test_needs_gdb def test_concurrent_delete_and_restore(self): """ make node, take full backup, take page backup, @@ -349,7 +351,6 @@ def test_concurrent_delete_and_restore(self): Expect restore to fail because validation of intermediate backup is impossible """ - self._check_gdb_flag_or_skip_test() node = self.make_simple_node( base_dir=os.path.join(self.module_name, self.fname, 'node'), @@ -398,13 +399,13 @@ def test_concurrent_delete_and_restore(self): # Clean after yourself gdb.kill() + @test_needs_gdb def test_locking_concurrent_validate_and_backup(self): """ make node, take full backup, launch validate and stop it in the middle, take page backup. Expect PAGE backup to be successfully executed """ - self._check_gdb_flag_or_skip_test() node = self.make_simple_node( base_dir=os.path.join(self.module_name, self.fname, 'node'), @@ -434,13 +435,13 @@ def test_locking_concurrent_validate_and_backup(self): # Clean after yourself gdb.kill() + @test_needs_gdb def test_locking_concurren_restore_and_delete(self): """ make node, take full backup, launch restore and stop it in the middle, delete full backup. Expect it to fail. """ - self._check_gdb_flag_or_skip_test() node = self.make_simple_node( base_dir=os.path.join(self.module_name, self.fname, 'node'), @@ -572,11 +573,11 @@ def test_empty_lock_file(self): # p1.wait() # p2.wait() + @test_needs_gdb def test_shared_lock(self): """ Make sure that shared lock leaves no files with pids """ - self._check_gdb_flag_or_skip_test() node = self.make_simple_node( base_dir=os.path.join(self.module_name, self.fname, 'node'), diff --git a/tests/logging_test.py b/tests/logging_test.py index c5cdfa344..9623804fb 100644 --- a/tests/logging_test.py +++ b/tests/logging_test.py @@ -1,6 +1,7 @@ import unittest import os from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import test_needs_gdb import datetime class LogTest(ProbackupTest, unittest.TestCase): @@ -8,10 +9,10 @@ class LogTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") # @unittest.expectedFailure # PGPRO-2154 + @test_needs_gdb def test_log_rotation(self): """ """ - self._check_gdb_flag_or_skip_test() node = self.make_simple_node( base_dir=os.path.join(self.module_name, self.fname, 'node'), diff --git a/tests/merge_test.py b/tests/merge_test.py index eb57463fe..6fcca5ab3 100644 --- a/tests/merge_test.py +++ b/tests/merge_test.py @@ -3,6 +3,7 @@ import unittest import os from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import test_needs_gdb from testgres import QueryException import shutil from datetime import datetime, timedelta @@ -914,11 +915,11 @@ def test_merge_delta_delete(self): node_restored.slow_start() # @unittest.skip("skip") + @test_needs_gdb def test_continue_failed_merge(self): """ Check that failed MERGE can be continued """ - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( @@ -988,11 +989,11 @@ def test_continue_failed_merge(self): self.restore_node(backup_dir, 'node', node) # @unittest.skip("skip") + @test_needs_gdb def test_continue_failed_merge_with_corrupted_delta_backup(self): """ Fail merge via gdb, corrupt DELTA backup, try to continue merge """ - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( @@ -1083,11 +1084,11 @@ def test_continue_failed_merge_with_corrupted_delta_backup(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) + @test_needs_gdb def test_continue_failed_merge_2(self): """ Check that failed MERGE on delete can be continued """ - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( @@ -1156,12 +1157,12 @@ def test_continue_failed_merge_2(self): # Try to continue failed MERGE self.merge_backup(backup_dir, "node", backup_id) + @test_needs_gdb def test_continue_failed_merge_3(self): """ Check that failed MERGE cannot be continued if intermediate backup is missing. """ - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( @@ -1338,12 +1339,12 @@ def test_merge_different_wal_modes(self): self.assertEqual( 'STREAM', self.show_pb(backup_dir, 'node', backup_id)['wal']) + @test_needs_gdb def test_crash_after_opening_backup_control_1(self): """ check that crashing after opening backup.control for writing will not result in losing backup metadata """ - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( @@ -1388,13 +1389,13 @@ def test_crash_after_opening_backup_control_1(self): 'MERGING', self.show_pb(backup_dir, 'node')[1]['status']) # @unittest.skip("skip") + @test_needs_gdb def test_crash_after_opening_backup_control_2(self): """ check that crashing after opening backup_content.control for writing will not result in losing metadata about backup files TODO: rewrite """ - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( @@ -1478,13 +1479,13 @@ def test_crash_after_opening_backup_control_2(self): self.compare_pgdata(pgdata, pgdata_restored) # @unittest.skip("skip") + @test_needs_gdb def test_losing_file_after_failed_merge(self): """ check that crashing after opening backup_content.control for writing will not result in losing metadata about backup files TODO: rewrite """ - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( @@ -1567,10 +1568,10 @@ def test_losing_file_after_failed_merge(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) + @test_needs_gdb def test_failed_merge_after_delete(self): """ """ - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( @@ -1648,10 +1649,10 @@ def test_failed_merge_after_delete(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) + @test_needs_gdb def test_failed_merge_after_delete_1(self): """ """ - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( @@ -1724,10 +1725,10 @@ def test_failed_merge_after_delete_1(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) + @test_needs_gdb def test_failed_merge_after_delete_2(self): """ """ - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( @@ -1788,10 +1789,10 @@ def test_failed_merge_after_delete_2(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) + @test_needs_gdb def test_failed_merge_after_delete_3(self): """ """ - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( @@ -2199,10 +2200,10 @@ def test_smart_merge(self): with open(logfile, 'r') as f: logfile_content = f.read() + @test_needs_gdb def test_idempotent_merge(self): """ """ - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( @@ -2483,12 +2484,12 @@ def test_multi_timeline_merge(self): # @unittest.skip("skip") # @unittest.expectedFailure + @test_needs_gdb def test_merge_page_header_map_retry(self): """ page header map cannot be trusted when running retry """ - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( @@ -2529,10 +2530,10 @@ def test_merge_page_header_map_retry(self): self.compare_pgdata(pgdata, pgdata_restored) # @unittest.skip("skip") + @test_needs_gdb def test_missing_data_file(self): """ """ - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( @@ -2586,10 +2587,10 @@ def test_missing_data_file(self): logfile_content) # @unittest.skip("skip") + @test_needs_gdb def test_missing_non_data_file(self): """ """ - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( @@ -2642,10 +2643,10 @@ def test_missing_non_data_file(self): 'MERGING', self.show_pb(backup_dir, 'node')[1]['status']) # @unittest.skip("skip") + @test_needs_gdb def test_merge_remote_mode(self): """ """ - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( diff --git a/tests/pgpro2068_test.py b/tests/pgpro2068_test.py index bad1653b3..5572732e1 100644 --- a/tests/pgpro2068_test.py +++ b/tests/pgpro2068_test.py @@ -1,6 +1,7 @@ import os import unittest from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack +from .helpers.ptrack_helpers import test_needs_gdb from datetime import datetime, timedelta import subprocess from time import sleep @@ -11,11 +12,11 @@ class BugTest(ProbackupTest, unittest.TestCase): + @test_needs_gdb def test_minrecpoint_on_replica(self): """ https://jira.postgrespro.ru/browse/PGPRO-2068 """ - self._check_gdb_flag_or_skip_test() node = self.make_simple_node( base_dir=os.path.join(self.module_name, self.fname, 'node'), diff --git a/tests/ptrack_test.py b/tests/ptrack_test.py index e24ddfe47..f79c967c7 100644 --- a/tests/ptrack_test.py +++ b/tests/ptrack_test.py @@ -1,6 +1,7 @@ import os import unittest from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack +from .helpers.ptrack_helpers import test_needs_gdb from datetime import datetime, timedelta import subprocess from testgres import QueryException, StartNodeException @@ -16,11 +17,11 @@ def setUp(self): self.skipTest('You need PostgreSQL >= 11 for this test') # @unittest.skip("skip") + @test_needs_gdb def test_drop_rel_during_backup_ptrack(self): """ drop relation during ptrack backup """ - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( @@ -745,10 +746,10 @@ def test_ptrack_uncommitted_xact(self): self.compare_pgdata(pgdata, pgdata_restored) # @unittest.skip("skip") - def test_ptrack_vacuum_full(self): + @test_needs_gdb + def test_ptrack_vacuum_full_1(self): """make node, make full and ptrack stream backups, restore them and check data correctness""" - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( @@ -909,12 +910,12 @@ def test_ptrack_vacuum_truncate(self): node_restored.slow_start() # @unittest.skip("skip") + @test_needs_gdb def test_ptrack_get_block(self): """ make node, make full and ptrack stream backups, restore them and check data correctness """ - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( diff --git a/tests/replica_test.py b/tests/replica_test.py index 622647c3a..206c250ce 100644 --- a/tests/replica_test.py +++ b/tests/replica_test.py @@ -2,6 +2,7 @@ import threading import unittest from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack +from .helpers.ptrack_helpers import test_needs_gdb from datetime import datetime, timedelta import subprocess import time @@ -476,12 +477,12 @@ def test_take_backup_from_delayed_replica(self): pgbench.wait() # @unittest.skip("skip") + @test_needs_gdb def test_replica_promote(self): """ start backup from replica, during backup promote replica check that backup is failed """ - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( @@ -561,10 +562,10 @@ def test_replica_promote(self): log_content) # @unittest.skip("skip") + @test_needs_gdb def test_replica_stop_lsn_null_offset(self): """ """ - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( @@ -623,10 +624,10 @@ def test_replica_stop_lsn_null_offset(self): gdb_checkpointer.kill() # @unittest.skip("skip") + @test_needs_gdb def test_replica_stop_lsn_null_offset_next_record(self): """ """ - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( @@ -714,10 +715,10 @@ def test_replica_stop_lsn_null_offset_next_record(self): self.assertTrue(self.show_pb(backup_dir, 'replica')[0]['status'] == 'DONE') # @unittest.skip("skip") + @test_needs_gdb def test_archive_replica_null_offset(self): """ """ - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( @@ -836,12 +837,12 @@ def test_archive_replica_not_null_offset(self): "\n CMD: {0}".format(self.cmd)) # @unittest.skip("skip") + @test_needs_gdb def test_replica_toast(self): """ make archive master, take full and page archive backups from master, set replica, make archive backup from replica """ - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( diff --git a/tests/restore_test.py b/tests/restore_test.py index 62e48e3ac..34ce11782 100644 --- a/tests/restore_test.py +++ b/tests/restore_test.py @@ -1,6 +1,7 @@ import os import unittest from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import test_needs_gdb import subprocess import sys from time import sleep @@ -2242,9 +2243,9 @@ def test_pg_11_group_access(self): self.compare_pgdata(pgdata, pgdata_restored) # @unittest.skip("skip") + @test_needs_gdb def test_restore_concurrent_drop_table(self): """""" - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( @@ -3568,9 +3569,9 @@ def test_truncate_postgresql_auto_conf(self): self.assertTrue(os.path.exists(auto_path)) # @unittest.skip("skip") + @test_needs_gdb def test_concurrent_restore(self): """""" - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( diff --git a/tests/retention_test.py b/tests/retention_test.py index cafaf88ec..029c097f4 100644 --- a/tests/retention_test.py +++ b/tests/retention_test.py @@ -2,6 +2,7 @@ import unittest from datetime import datetime, timedelta from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import test_needs_gdb from time import sleep from distutils.dir_util import copy_tree @@ -1437,6 +1438,7 @@ def test_window_error_backups(self): # self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') # @unittest.skip("skip") + @test_needs_gdb def test_window_error_backups_1(self): """ DELTA @@ -1444,7 +1446,6 @@ def test_window_error_backups_1(self): FULL -------window """ - self._check_gdb_flag_or_skip_test() node = self.make_simple_node( base_dir=os.path.join(self.module_name, self.fname, 'node'), @@ -1483,6 +1484,7 @@ def test_window_error_backups_1(self): self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) # @unittest.skip("skip") + @test_needs_gdb def test_window_error_backups_2(self): """ DELTA @@ -1490,7 +1492,6 @@ def test_window_error_backups_2(self): FULL -------window """ - self._check_gdb_flag_or_skip_test() node = self.make_simple_node( base_dir=os.path.join(self.module_name, self.fname, 'node'), @@ -1524,9 +1525,9 @@ def test_window_error_backups_2(self): self.assertEqual(len(self.show_pb(backup_dir, 'node')), 3) + @test_needs_gdb def test_retention_redundancy_overlapping_chains(self): """""" - self._check_gdb_flag_or_skip_test() node = self.make_simple_node( base_dir=os.path.join(self.module_name, self.fname, 'node'), @@ -1566,9 +1567,9 @@ def test_retention_redundancy_overlapping_chains(self): self.validate_pb(backup_dir, 'node') + @test_needs_gdb def test_retention_redundancy_overlapping_chains_1(self): """""" - self._check_gdb_flag_or_skip_test() node = self.make_simple_node( base_dir=os.path.join(self.module_name, self.fname, 'node'), @@ -1662,11 +1663,11 @@ def test_wal_purge_victim(self): e.message) # @unittest.skip("skip") + @test_needs_gdb def test_failed_merge_redundancy_retention(self): """ Check that retention purge works correctly with MERGING backups """ - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( @@ -2440,11 +2441,11 @@ def test_basic_wal_depth(self): self.validate_pb(backup_dir, 'node') + @test_needs_gdb def test_concurrent_running_full_backup(self): """ https://github.com/postgrespro/pg_probackup/issues/328 """ - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( diff --git a/tests/validate_test.py b/tests/validate_test.py index 15c58df3d..ecfdfd8c2 100644 --- a/tests/validate_test.py +++ b/tests/validate_test.py @@ -1,6 +1,7 @@ import os import unittest from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.ptrack_helpers import test_needs_gdb from datetime import datetime, timedelta from pathlib import Path import subprocess @@ -1046,11 +1047,11 @@ def test_validate_instance_with_several_corrupt_backups(self): 'Backup STATUS should be "OK"') # @unittest.skip("skip") + @test_needs_gdb def test_validate_instance_with_several_corrupt_backups_interrupt(self): """ check that interrupt during validation is handled correctly """ - self._check_gdb_flag_or_skip_test() node = self.make_simple_node( base_dir=os.path.join(self.module_name, self.fname, 'node'), @@ -3428,9 +3429,9 @@ def test_corrupt_pg_control_via_resetxlog(self): repr(e.message), self.cmd)) # @unittest.skip("skip") + @test_needs_gdb def test_validation_after_backup(self): """""" - self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( From cf10582d6a7d34111368e7309eaa8c75175fb70b Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 28 Dec 2022 13:30:04 +0300 Subject: [PATCH 331/339] [PBCKP-423] remove redundant ptrack warning --- tests/archive_test.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/archive_test.py b/tests/archive_test.py index 3a5679e37..5bfd14373 100644 --- a/tests/archive_test.py +++ b/tests/archive_test.py @@ -1929,6 +1929,13 @@ def test_archive_push_sanity(self): with open(os.path.join(replica.logs_dir, 'postgresql.log'), 'r') as f: replica_log_content = f.read() + # PBCKP-423 - need to clean ptrack warning + ptrack_is_not = 'Ptrack 1.X is not supported anymore' + if ptrack_is_not in replica_log_content: + lines = [line for line in replica_log_content.splitlines() + if ptrack_is_not not in line] + replica_log_content = "".join(lines) + # make sure that .partial file is not compressed self.assertNotIn('.partial.gz', replica_log_content) # make sure that .history file is not compressed From 7b587f75f02126d9f5838305a5c007e195726b4b Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 28 Dec 2022 13:44:16 +0300 Subject: [PATCH 332/339] [PBCKP-423] and again --- tests/archive_test.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/tests/archive_test.py b/tests/archive_test.py index 5bfd14373..d886602f8 100644 --- a/tests/archive_test.py +++ b/tests/archive_test.py @@ -1898,7 +1898,7 @@ def test_archive_push_sanity(self): self.backup_node(backup_dir, 'node', node) with open(os.path.join(node.logs_dir, 'postgresql.log'), 'r') as f: - postgres_log_content = f.read() + postgres_log_content = cleanup_ptrack(f.read()) # print(postgres_log_content) # make sure that .backup file is not compressed @@ -1927,14 +1927,7 @@ def test_archive_push_sanity(self): replica.pgbench_init(scale=10) with open(os.path.join(replica.logs_dir, 'postgresql.log'), 'r') as f: - replica_log_content = f.read() - - # PBCKP-423 - need to clean ptrack warning - ptrack_is_not = 'Ptrack 1.X is not supported anymore' - if ptrack_is_not in replica_log_content: - lines = [line for line in replica_log_content.splitlines() - if ptrack_is_not not in line] - replica_log_content = "".join(lines) + replica_log_content = cleanup_ptrack(f.read()) # make sure that .partial file is not compressed self.assertNotIn('.partial.gz', replica_log_content) @@ -2519,6 +2512,15 @@ def test_archive_empty_history_file(self): 'WARNING: History file is corrupted or missing: "{0}"'.format(os.path.join(wal_dir, '00000004.history')), log_content) +def cleanup_ptrack(log_content): + # PBCKP-423 - need to clean ptrack warning + ptrack_is_not = 'Ptrack 1.X is not supported anymore' + if ptrack_is_not in log_content: + lines = [line for line in log_content.splitlines() + if ptrack_is_not not in line] + log_content = "".join(lines) + return log_content + # TODO test with multiple not archived segments. # TODO corrupted file in archive. From 0edd95862f28a730e318ab3cd92d719d555a1fc4 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 29 Dec 2022 01:37:24 +0300 Subject: [PATCH 333/339] s/test_needs_gdb/need_gdb/ fixtures plugin fails to fill 'func' fixture for 'test_needs_gdb' :-( --- tests/archive_test.py | 6 +++--- tests/backup_test.py | 20 +++++++++---------- tests/checkdb_test.py | 6 +++--- tests/delta_test.py | 4 ++-- tests/helpers/ptrack_helpers.py | 4 ++-- tests/locking_test.py | 20 +++++++++---------- tests/logging_test.py | 4 ++-- tests/merge_test.py | 34 ++++++++++++++++----------------- tests/pgpro2068_test.py | 4 ++-- tests/ptrack_test.py | 8 ++++---- tests/replica_test.py | 12 ++++++------ tests/restore_test.py | 6 +++--- tests/retention_test.py | 14 +++++++------- tests/validate_test.py | 6 +++--- 14 files changed, 74 insertions(+), 74 deletions(-) diff --git a/tests/archive_test.py b/tests/archive_test.py index d886602f8..d2fc4d3d8 100644 --- a/tests/archive_test.py +++ b/tests/archive_test.py @@ -3,7 +3,7 @@ import gzip import unittest from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, GdbException -from .helpers.ptrack_helpers import tail_file, test_needs_gdb +from .helpers.ptrack_helpers import tail_file, needs_gdb from datetime import datetime, timedelta import subprocess from sys import exit @@ -205,7 +205,7 @@ def test_pgpro434_2(self): 'data after restore not equal to original data') # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_pgpro434_3(self): """ Check pg_stop_backup_timeout, needed backup_timeout @@ -262,7 +262,7 @@ def test_pgpro434_3(self): 'PostgreSQL crashed because of a failed assert') # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_pgpro434_4(self): """ Check pg_stop_backup_timeout, libpq-timeout requested. diff --git a/tests/backup_test.py b/tests/backup_test.py index b604db844..eb799b937 100644 --- a/tests/backup_test.py +++ b/tests/backup_test.py @@ -3,7 +3,7 @@ import re from time import sleep, time from .helpers.ptrack_helpers import base36enc, ProbackupTest, ProbackupException -from .helpers.ptrack_helpers import test_needs_gdb +from .helpers.ptrack_helpers import needs_gdb import shutil from distutils.dir_util import copy_tree from testgres import ProcessType, QueryException @@ -1091,7 +1091,7 @@ def test_tablespace_handling_2(self): repr(e.message), self.cmd)) # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_drop_rel_during_full_backup(self): """""" @@ -1234,7 +1234,7 @@ def test_drop_db_during_full_backup(self): self.compare_pgdata(pgdata, pgdata_restored) # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_drop_rel_during_backup_delta(self): """""" @@ -1301,7 +1301,7 @@ def test_drop_rel_during_backup_delta(self): self.compare_pgdata(pgdata, pgdata_restored) # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_drop_rel_during_backup_page(self): """""" @@ -1420,7 +1420,7 @@ def test_basic_temp_slot_for_stream_backup(self): options=['--stream', '--slot=slot_1', '--temp-slot']) # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_backup_concurrent_drop_table(self): """""" @@ -1548,7 +1548,7 @@ def test_pg_11_adjusted_wal_segment_size(self): self.compare_pgdata(pgdata, pgdata_restored) # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_sigint_handling(self): """""" @@ -1585,7 +1585,7 @@ def test_sigint_handling(self): 'Backup STATUS should be "ERROR"') # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_sigterm_handling(self): """""" @@ -1621,7 +1621,7 @@ def test_sigterm_handling(self): 'Backup STATUS should be "ERROR"') # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_sigquit_handling(self): """""" @@ -2725,7 +2725,7 @@ def test_incr_backup_filenode_map(self): 'select 1') # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_missing_wal_segment(self): """""" @@ -3057,7 +3057,7 @@ def test_basic_backup_default_transaction_read_only(self): self.backup_node(backup_dir, 'node', node, backup_type='page') # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_backup_atexit(self): """""" diff --git a/tests/checkdb_test.py b/tests/checkdb_test.py index 92244f345..c94f15c75 100644 --- a/tests/checkdb_test.py +++ b/tests/checkdb_test.py @@ -1,7 +1,7 @@ import os import unittest from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -from .helpers.ptrack_helpers import test_needs_gdb +from .helpers.ptrack_helpers import needs_gdb from datetime import datetime, timedelta import subprocess from testgres import QueryException @@ -13,7 +13,7 @@ class CheckdbTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_checkdb_amcheck_only_sanity(self): """""" @@ -550,7 +550,7 @@ def test_checkdb_checkunique(self): node.stop() # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_checkdb_sigint_handling(self): """""" diff --git a/tests/delta_test.py b/tests/delta_test.py index ab7064305..5e02c96e8 100644 --- a/tests/delta_test.py +++ b/tests/delta_test.py @@ -1,7 +1,7 @@ import os import unittest from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -from .helpers.ptrack_helpers import test_needs_gdb +from .helpers.ptrack_helpers import needs_gdb from datetime import datetime, timedelta from testgres import QueryException import subprocess @@ -439,7 +439,7 @@ def test_delta_multiple_segments(self): self.compare_pgdata(pgdata, pgdata_restored) # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_delta_vacuum_full(self): """ make node, make full and delta stream backups, diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 38ae64c8e..5e4ec9b03 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -2006,7 +2006,7 @@ def _check_gdb_flag_or_skip_test(self): else: self.fail("use of gdb is not possible") -def test_needs_gdb(func): +def needs_gdb(func): def wrapped(self): self._gdb_decorated = True self._check_gdb_flag_or_skip_test() @@ -2030,7 +2030,7 @@ def __init__(self, cmd, env, attach=False): # Check gdb flag is set up if not getattr(env, "_gdb_decorated", False): - raise GdbException("Test should be marked with @test_needs_gdb") + raise GdbException("Test should be marked with @needs_gdb") if not env._gdb_enabled: raise GdbException("No `PGPROBACKUP_GDB=on` is set.") if not env._gdb_ok: diff --git a/tests/locking_test.py b/tests/locking_test.py index fe5093b92..f2740a6e6 100644 --- a/tests/locking_test.py +++ b/tests/locking_test.py @@ -2,14 +2,14 @@ import os from time import sleep from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -from .helpers.ptrack_helpers import test_needs_gdb +from .helpers.ptrack_helpers import needs_gdb class LockingTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") # @unittest.expectedFailure - @test_needs_gdb + @needs_gdb def test_locking_running_validate_1(self): """ make node, take full backup, stop it in the middle @@ -62,7 +62,7 @@ def test_locking_running_validate_1(self): # Clean after yourself gdb.kill() - @test_needs_gdb + @needs_gdb def test_locking_running_validate_2(self): """ make node, take full backup, stop it in the middle, @@ -131,7 +131,7 @@ def test_locking_running_validate_2(self): # Clean after yourself gdb.kill() - @test_needs_gdb + @needs_gdb def test_locking_running_validate_2_specific_id(self): """ make node, take full backup, stop it in the middle, @@ -230,7 +230,7 @@ def test_locking_running_validate_2_specific_id(self): # Clean after yourself gdb.kill() - @test_needs_gdb + @needs_gdb def test_locking_running_3(self): """ make node, take full backup, stop it in the middle, @@ -300,7 +300,7 @@ def test_locking_running_3(self): # Clean after yourself gdb.kill() - @test_needs_gdb + @needs_gdb def test_locking_restore_locked(self): """ make node, take full backup, take two page backups, @@ -342,7 +342,7 @@ def test_locking_restore_locked(self): # Clean after yourself gdb.kill() - @test_needs_gdb + @needs_gdb def test_concurrent_delete_and_restore(self): """ make node, take full backup, take page backup, @@ -399,7 +399,7 @@ def test_concurrent_delete_and_restore(self): # Clean after yourself gdb.kill() - @test_needs_gdb + @needs_gdb def test_locking_concurrent_validate_and_backup(self): """ make node, take full backup, launch validate @@ -435,7 +435,7 @@ def test_locking_concurrent_validate_and_backup(self): # Clean after yourself gdb.kill() - @test_needs_gdb + @needs_gdb def test_locking_concurren_restore_and_delete(self): """ make node, take full backup, launch restore @@ -573,7 +573,7 @@ def test_empty_lock_file(self): # p1.wait() # p2.wait() - @test_needs_gdb + @needs_gdb def test_shared_lock(self): """ Make sure that shared lock leaves no files with pids diff --git a/tests/logging_test.py b/tests/logging_test.py index 9623804fb..998c92797 100644 --- a/tests/logging_test.py +++ b/tests/logging_test.py @@ -1,7 +1,7 @@ import unittest import os from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -from .helpers.ptrack_helpers import test_needs_gdb +from .helpers.ptrack_helpers import needs_gdb import datetime class LogTest(ProbackupTest, unittest.TestCase): @@ -9,7 +9,7 @@ class LogTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") # @unittest.expectedFailure # PGPRO-2154 - @test_needs_gdb + @needs_gdb def test_log_rotation(self): """ """ diff --git a/tests/merge_test.py b/tests/merge_test.py index 6fcca5ab3..fddaeb6a3 100644 --- a/tests/merge_test.py +++ b/tests/merge_test.py @@ -3,7 +3,7 @@ import unittest import os from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -from .helpers.ptrack_helpers import test_needs_gdb +from .helpers.ptrack_helpers import needs_gdb from testgres import QueryException import shutil from datetime import datetime, timedelta @@ -915,7 +915,7 @@ def test_merge_delta_delete(self): node_restored.slow_start() # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_continue_failed_merge(self): """ Check that failed MERGE can be continued @@ -989,7 +989,7 @@ def test_continue_failed_merge(self): self.restore_node(backup_dir, 'node', node) # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_continue_failed_merge_with_corrupted_delta_backup(self): """ Fail merge via gdb, corrupt DELTA backup, try to continue merge @@ -1084,7 +1084,7 @@ def test_continue_failed_merge_with_corrupted_delta_backup(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - @test_needs_gdb + @needs_gdb def test_continue_failed_merge_2(self): """ Check that failed MERGE on delete can be continued @@ -1157,7 +1157,7 @@ def test_continue_failed_merge_2(self): # Try to continue failed MERGE self.merge_backup(backup_dir, "node", backup_id) - @test_needs_gdb + @needs_gdb def test_continue_failed_merge_3(self): """ Check that failed MERGE cannot be continued if intermediate @@ -1339,7 +1339,7 @@ def test_merge_different_wal_modes(self): self.assertEqual( 'STREAM', self.show_pb(backup_dir, 'node', backup_id)['wal']) - @test_needs_gdb + @needs_gdb def test_crash_after_opening_backup_control_1(self): """ check that crashing after opening backup.control @@ -1389,7 +1389,7 @@ def test_crash_after_opening_backup_control_1(self): 'MERGING', self.show_pb(backup_dir, 'node')[1]['status']) # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_crash_after_opening_backup_control_2(self): """ check that crashing after opening backup_content.control @@ -1479,7 +1479,7 @@ def test_crash_after_opening_backup_control_2(self): self.compare_pgdata(pgdata, pgdata_restored) # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_losing_file_after_failed_merge(self): """ check that crashing after opening backup_content.control @@ -1568,7 +1568,7 @@ def test_losing_file_after_failed_merge(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - @test_needs_gdb + @needs_gdb def test_failed_merge_after_delete(self): """ """ @@ -1649,7 +1649,7 @@ def test_failed_merge_after_delete(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - @test_needs_gdb + @needs_gdb def test_failed_merge_after_delete_1(self): """ """ @@ -1725,7 +1725,7 @@ def test_failed_merge_after_delete_1(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - @test_needs_gdb + @needs_gdb def test_failed_merge_after_delete_2(self): """ """ @@ -1789,7 +1789,7 @@ def test_failed_merge_after_delete_2(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - @test_needs_gdb + @needs_gdb def test_failed_merge_after_delete_3(self): """ """ @@ -2200,7 +2200,7 @@ def test_smart_merge(self): with open(logfile, 'r') as f: logfile_content = f.read() - @test_needs_gdb + @needs_gdb def test_idempotent_merge(self): """ """ @@ -2484,7 +2484,7 @@ def test_multi_timeline_merge(self): # @unittest.skip("skip") # @unittest.expectedFailure - @test_needs_gdb + @needs_gdb def test_merge_page_header_map_retry(self): """ page header map cannot be trusted when @@ -2530,7 +2530,7 @@ def test_merge_page_header_map_retry(self): self.compare_pgdata(pgdata, pgdata_restored) # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_missing_data_file(self): """ """ @@ -2587,7 +2587,7 @@ def test_missing_data_file(self): logfile_content) # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_missing_non_data_file(self): """ """ @@ -2643,7 +2643,7 @@ def test_missing_non_data_file(self): 'MERGING', self.show_pb(backup_dir, 'node')[1]['status']) # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_merge_remote_mode(self): """ """ diff --git a/tests/pgpro2068_test.py b/tests/pgpro2068_test.py index 5572732e1..4582d419b 100644 --- a/tests/pgpro2068_test.py +++ b/tests/pgpro2068_test.py @@ -1,7 +1,7 @@ import os import unittest from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack -from .helpers.ptrack_helpers import test_needs_gdb +from .helpers.ptrack_helpers import needs_gdb from datetime import datetime, timedelta import subprocess from time import sleep @@ -12,7 +12,7 @@ class BugTest(ProbackupTest, unittest.TestCase): - @test_needs_gdb + @needs_gdb def test_minrecpoint_on_replica(self): """ https://jira.postgrespro.ru/browse/PGPRO-2068 diff --git a/tests/ptrack_test.py b/tests/ptrack_test.py index f79c967c7..4597954f1 100644 --- a/tests/ptrack_test.py +++ b/tests/ptrack_test.py @@ -1,7 +1,7 @@ import os import unittest from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack -from .helpers.ptrack_helpers import test_needs_gdb +from .helpers.ptrack_helpers import needs_gdb from datetime import datetime, timedelta import subprocess from testgres import QueryException, StartNodeException @@ -17,7 +17,7 @@ def setUp(self): self.skipTest('You need PostgreSQL >= 11 for this test') # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_drop_rel_during_backup_ptrack(self): """ drop relation during ptrack backup @@ -746,7 +746,7 @@ def test_ptrack_uncommitted_xact(self): self.compare_pgdata(pgdata, pgdata_restored) # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_ptrack_vacuum_full_1(self): """make node, make full and ptrack stream backups, restore them and check data correctness""" @@ -910,7 +910,7 @@ def test_ptrack_vacuum_truncate(self): node_restored.slow_start() # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_ptrack_get_block(self): """ make node, make full and ptrack stream backups, diff --git a/tests/replica_test.py b/tests/replica_test.py index 206c250ce..dc70917fa 100644 --- a/tests/replica_test.py +++ b/tests/replica_test.py @@ -2,7 +2,7 @@ import threading import unittest from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack -from .helpers.ptrack_helpers import test_needs_gdb +from .helpers.ptrack_helpers import needs_gdb from datetime import datetime, timedelta import subprocess import time @@ -477,7 +477,7 @@ def test_take_backup_from_delayed_replica(self): pgbench.wait() # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_replica_promote(self): """ start backup from replica, during backup promote replica @@ -562,7 +562,7 @@ def test_replica_promote(self): log_content) # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_replica_stop_lsn_null_offset(self): """ """ @@ -624,7 +624,7 @@ def test_replica_stop_lsn_null_offset(self): gdb_checkpointer.kill() # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_replica_stop_lsn_null_offset_next_record(self): """ """ @@ -715,7 +715,7 @@ def test_replica_stop_lsn_null_offset_next_record(self): self.assertTrue(self.show_pb(backup_dir, 'replica')[0]['status'] == 'DONE') # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_archive_replica_null_offset(self): """ """ @@ -837,7 +837,7 @@ def test_archive_replica_not_null_offset(self): "\n CMD: {0}".format(self.cmd)) # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_replica_toast(self): """ make archive master, take full and page archive backups from master, diff --git a/tests/restore_test.py b/tests/restore_test.py index 34ce11782..8f9a00eeb 100644 --- a/tests/restore_test.py +++ b/tests/restore_test.py @@ -1,7 +1,7 @@ import os import unittest from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -from .helpers.ptrack_helpers import test_needs_gdb +from .helpers.ptrack_helpers import needs_gdb import subprocess import sys from time import sleep @@ -2243,7 +2243,7 @@ def test_pg_11_group_access(self): self.compare_pgdata(pgdata, pgdata_restored) # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_restore_concurrent_drop_table(self): """""" @@ -3569,7 +3569,7 @@ def test_truncate_postgresql_auto_conf(self): self.assertTrue(os.path.exists(auto_path)) # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_concurrent_restore(self): """""" diff --git a/tests/retention_test.py b/tests/retention_test.py index 029c097f4..b9b94ed59 100644 --- a/tests/retention_test.py +++ b/tests/retention_test.py @@ -2,7 +2,7 @@ import unittest from datetime import datetime, timedelta from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -from .helpers.ptrack_helpers import test_needs_gdb +from .helpers.ptrack_helpers import needs_gdb from time import sleep from distutils.dir_util import copy_tree @@ -1438,7 +1438,7 @@ def test_window_error_backups(self): # self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_window_error_backups_1(self): """ DELTA @@ -1484,7 +1484,7 @@ def test_window_error_backups_1(self): self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_window_error_backups_2(self): """ DELTA @@ -1525,7 +1525,7 @@ def test_window_error_backups_2(self): self.assertEqual(len(self.show_pb(backup_dir, 'node')), 3) - @test_needs_gdb + @needs_gdb def test_retention_redundancy_overlapping_chains(self): """""" @@ -1567,7 +1567,7 @@ def test_retention_redundancy_overlapping_chains(self): self.validate_pb(backup_dir, 'node') - @test_needs_gdb + @needs_gdb def test_retention_redundancy_overlapping_chains_1(self): """""" @@ -1663,7 +1663,7 @@ def test_wal_purge_victim(self): e.message) # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_failed_merge_redundancy_retention(self): """ Check that retention purge works correctly with MERGING backups @@ -2441,7 +2441,7 @@ def test_basic_wal_depth(self): self.validate_pb(backup_dir, 'node') - @test_needs_gdb + @needs_gdb def test_concurrent_running_full_backup(self): """ https://github.com/postgrespro/pg_probackup/issues/328 diff --git a/tests/validate_test.py b/tests/validate_test.py index ecfdfd8c2..7c5a34bf2 100644 --- a/tests/validate_test.py +++ b/tests/validate_test.py @@ -1,7 +1,7 @@ import os import unittest from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -from .helpers.ptrack_helpers import test_needs_gdb +from .helpers.ptrack_helpers import needs_gdb from datetime import datetime, timedelta from pathlib import Path import subprocess @@ -1047,7 +1047,7 @@ def test_validate_instance_with_several_corrupt_backups(self): 'Backup STATUS should be "OK"') # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_validate_instance_with_several_corrupt_backups_interrupt(self): """ check that interrupt during validation is handled correctly @@ -3429,7 +3429,7 @@ def test_corrupt_pg_control_via_resetxlog(self): repr(e.message), self.cmd)) # @unittest.skip("skip") - @test_needs_gdb + @needs_gdb def test_validation_after_backup(self): """""" From 428d10ddc6c4da1ab8ccffaa005d4f94a5d1bdb0 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 29 Dec 2022 02:56:34 +0300 Subject: [PATCH 334/339] pioDirNext: skip just deleted file --- src/utils/file.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/utils/file.c b/src/utils/file.c index 835f21e22..c9ed0a8f6 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -3135,6 +3135,11 @@ pioLocalDir_pioDirNext(VSelf, err_i* err) join_path_components(path, self->path.ptr, ent->d_name); entry.stat = $i(pioStat, localDrive, path, true, .err = err); + if (getErrno(*err) == ENOENT) + { /* skip just deleted file */ + fobj_reset_err(err); // will be released within outter ARP. + continue; + } if ($haserr(*err)) return entry; From 3dd5219e130529a404b63661853076326c7bff73 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 29 Dec 2022 04:12:45 +0300 Subject: [PATCH 335/339] try to fix test_minrecpoint_on_replica for Pg<=12 --- tests/pgpro2068_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pgpro2068_test.py b/tests/pgpro2068_test.py index 4582d419b..bc8d1c263 100644 --- a/tests/pgpro2068_test.py +++ b/tests/pgpro2068_test.py @@ -86,7 +86,7 @@ def test_minrecpoint_on_replica(self): gdb_checkpointer = self.gdb_attach(checkpointer_pid) gdb_checkpointer._execute('handle SIGINT noprint nostop pass') gdb_checkpointer._execute('handle SIGUSR1 noprint nostop pass') - gdb_checkpointer.set_breakpoint('UpdateLastRemovedPtr') + gdb_checkpointer.set_breakpoint('RemoveOldXlogFiles') gdb_checkpointer.continue_execution_until_break() # break recovery on UpdateControlFile From 361b9a7b51d413023b1c0859f531585838e65e92 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 29 Dec 2022 04:13:36 +0300 Subject: [PATCH 336/339] fix for XLogReader's read_page callback contract --- src/parsexlog.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/parsexlog.c b/src/parsexlog.c index daf827dc9..f8adece24 100644 --- a/src/parsexlog.c +++ b/src/parsexlog.c @@ -900,6 +900,8 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, memcpy(readBuf, reader_data->page_buf, XLOG_BLCKSZ); #if PG_VERSION_NUM < 130000 *pageTLI = reader_data->tli; +#else + xlogreader->seg.ws_tli = reader_data->tli; #endif return XLOG_BLCKSZ; } @@ -931,6 +933,8 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, reader_data->prev_page_off = targetPageOff; #if PG_VERSION_NUM < 130000 *pageTLI = reader_data->tli; +#else + xlogreader->seg.ws_tli = reader_data->tli; #endif return XLOG_BLCKSZ; } From 649d1adc27660ecdf4a9dbcd84dffa2c274f46e7 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 29 Dec 2022 05:37:39 +0300 Subject: [PATCH 337/339] [PBCKP-431] refix test_minrecpoint_on_replica in fact there were too many wal_keep_segments set by default in make_simple_node --- tests/helpers/ptrack_helpers.py | 2 +- tests/pgpro2068_test.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 5e4ec9b03..f5b1903cc 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -557,7 +557,7 @@ def make_simple_node( if node.major_version >= 13: options['wal_keep_size'] = '200MB' else: - options['wal_keep_segments'] = '100' + options['wal_keep_segments'] = '12' # set default values self.set_auto_conf(node, options) diff --git a/tests/pgpro2068_test.py b/tests/pgpro2068_test.py index bc8d1c263..4582d419b 100644 --- a/tests/pgpro2068_test.py +++ b/tests/pgpro2068_test.py @@ -86,7 +86,7 @@ def test_minrecpoint_on_replica(self): gdb_checkpointer = self.gdb_attach(checkpointer_pid) gdb_checkpointer._execute('handle SIGINT noprint nostop pass') gdb_checkpointer._execute('handle SIGUSR1 noprint nostop pass') - gdb_checkpointer.set_breakpoint('RemoveOldXlogFiles') + gdb_checkpointer.set_breakpoint('UpdateLastRemovedPtr') gdb_checkpointer.continue_execution_until_break() # break recovery on UpdateControlFile From 4a9118171178b2f2962e0609392076c73e324894 Mon Sep 17 00:00:00 2001 From: Vyacheslav Makarov Date: Tue, 24 Jan 2023 13:03:02 +0300 Subject: [PATCH 338/339] [PBCKP-247]: typo in the option_get_value function. --- src/utils/configuration.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/utils/configuration.c b/src/utils/configuration.c index 0d760abac..0be646f68 100644 --- a/src/utils/configuration.c +++ b/src/utils/configuration.c @@ -675,7 +675,7 @@ option_get_value(ConfigOption *opt) if (opt->type == 'i') convert_from_base_unit(*((int32 *) opt->var), opt->flags & OPTION_UNIT, &value, &unit); - else if (opt->type == 'i') + else if (opt->type == 'I') convert_from_base_unit(*((int64 *) opt->var), opt->flags & OPTION_UNIT, &value, &unit); else if (opt->type == 'u') From 388251d0799cb8692536d68759d9cbeb684b6b14 Mon Sep 17 00:00:00 2001 From: Vyacheslav Makarov Date: Wed, 1 Feb 2023 08:12:40 +0300 Subject: [PATCH 339/339] [PBCKP-247]: test commit --- src/utils/configuration.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/utils/configuration.c b/src/utils/configuration.c index 0be646f68..0d760abac 100644 --- a/src/utils/configuration.c +++ b/src/utils/configuration.c @@ -675,7 +675,7 @@ option_get_value(ConfigOption *opt) if (opt->type == 'i') convert_from_base_unit(*((int32 *) opt->var), opt->flags & OPTION_UNIT, &value, &unit); - else if (opt->type == 'I') + else if (opt->type == 'i') convert_from_base_unit(*((int64 *) opt->var), opt->flags & OPTION_UNIT, &value, &unit); else if (opt->type == 'u') pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy