From ccaa46116279bc889d10783034b4d3fbde3aa668 Mon Sep 17 00:00:00 2001 From: zhangwenchao <656540940@qq.com> Date: Thu, 13 Jun 2024 10:49:20 +0800 Subject: [PATCH] Implement copy to directory table and support copy from/to program directory table. This commit, we support some directory features as following: 1. support copy to directory table, the grammar is such as: COPY BINARY DIRECTORY TABLE 'relative_path' to 'dest_path'; 2. support copy from/to directory table program 3. allow create/drop index on directory table 4. support download mode in gpdirtable tool Authored-by: Zhang Wenchao zwcpostgres@gmail.com --- gpMgmt/bin/gpdirtableload | 155 ++++++- src/backend/commands/copy.c | 15 +- src/backend/commands/copyfrom.c | 106 +++-- src/backend/commands/copyto.c | 410 ++++++++++++++++++ src/backend/commands/dirtablecmds.c | 5 +- src/backend/commands/indexcmds.c | 3 - src/backend/commands/tablecmds.c | 4 +- src/backend/parser/gram.y | 22 +- src/bin/psql/copy.c | 30 ++ src/bin/psql/describe.c | 2 +- src/include/commands/copy.h | 2 + src/include/commands/copyto_internal.h | 1 + src/include/parser/kwlist.h | 2 +- .../input/local_directory_table_mixed.source | 4 + .../output/local_directory_table_mixed.source | 11 + src/test/regress/input/directory_table.source | 63 ++- .../regress/output/directory_table.source | 270 +++++++++--- .../output/directory_table_optimizer.source | 247 ++++++++--- 18 files changed, 1125 insertions(+), 227 deletions(-) diff --git a/gpMgmt/bin/gpdirtableload b/gpMgmt/bin/gpdirtableload index 27a6d2a700e..0b2250e9936 100755 --- a/gpMgmt/bin/gpdirtableload +++ b/gpMgmt/bin/gpdirtableload @@ -67,23 +67,32 @@ def parseargs(): parser.add_argument('--database', '-d', default="gpadmin", help='Database to connect to') - parser.add_argument('--dest-path', help='Path relative to the table root directory') + parser.add_argument('--mode', choices=['upload', 'download'], default="upload", + help='Upload or download file to/from directory table') + parser.add_argument('--dest-path', help='In upload mode, this means path relative to ' + 'the table root directory, while in download ' + 'mode, means directory to download') parser.add_argument('--force-password-auth', default=False, action='store_true', help='Force a password prompt') parser.add_argument('--host', default="localhost", help='Host to connect to') - parser.add_argument('--input-file', help='Input files or directory') + parser.add_argument('--input-file', help='In upload mode, this means input files or ' + 'directory, while in download mode, means ' + 'which directory table to download') parser.add_argument('--logfile', help='Log output to logfile') + parser.add_argument('--tag', help='In download mode, only download the same tag files') + parser.add_argument('--force-write', default=False, action='store_true', + help='In download mode, force write files when files have existed') + parser.add_argument('--port', '-p', type=int, default="5432", help='Port to connect to') parser.add_argument('--stop-on-error', default=False, help='Stop loading files when an error occurs') parser.add_argument('--table', '-t', help='Directory table to load to') - parser.add_argument('--tag', help='Tag name') parser.add_argument('--tasks', '-T', type=int, default="1", help='The maximum number of files that concurrently loads') parser.add_argument('--user', '-U', default="gpadmin", @@ -155,6 +164,7 @@ class gpdirtableload: self.options.qv = self.INFO self.startTimestamp = time.time() self.pool = None + self.upload = True # set default log level if self.options.verbose is not None: @@ -162,6 +172,13 @@ class gpdirtableload: else: self.options.qv = self.INFO + # set load from/to + if self.options.mode is not None and self.options.mode == 'download': + self.upload = False + + if self.options.dest_path is None: + self.log(self.ERROR, '--dest-path must be set') + # default to gpAdminLogs for a log file, may be overwritten if self.options.logfile is None: self.options.logfile = os.path.join(os.environ.get('HOME', '.'), 'gpAdminLogs') @@ -334,16 +351,32 @@ class gpdirtableload: self.allFiles.append(filepath) self.numFiles = 1 + def collectAllFilesToDownload(self): + self.allFilesToDownload = [] + self.numFiles = 0 + + qry = "SELECT relative_path FROM %s " % self.options.table + + if self.options.tag: + qry += "WHERE tag = \'%s\'" % self.options.tag + + self.allFilesToDownload = [s[0] for s in + self.db.query(qry).getresult()] + self.numFiles = len(self.allFilesToDownload) + def confirmWorkers(self): if self.numFiles < self.options.tasks: self.numWorkers = self.numFiles else: self.numWorkers = self.options.tasks - def startLoadFiles(self): + def startUploadFiles(self): """ - startLoadFiles + startUploadFiles """ + if self.options.input_file is None: + self.log(self.ERROR, '--input-file must be set in upload mode') + self.pool = WorkerPool(numWorkers=self.numWorkers, should_stop=self.options.stop_on_error) srcfile = None @@ -357,7 +390,7 @@ class gpdirtableload: self.log(self.ERROR, 'cannot find greenplum environment ' + 'file: environment misconfigured') - cmdstrbase = "source %s ;" + cmdstrbase = "source %s ;" % srcfile cmdstrbase += "export PGPASSWORD=%s ; psql " % self.options.password @@ -401,13 +434,94 @@ class gpdirtableload: self.pool.haltWork() self.pool.joinWorkers() - def run2(self): + def startDownloadFiles(self): + """ + startDownloadFiles + """ + self.pool = WorkerPool(numWorkers=self.numWorkers, should_stop=self.options.stop_on_error) + + if not self.options.dest_path: + self.log(self.ERROR, 'dest-path is not set.') + if (not os.path.exists(self.options.dest_path)): + self.log(self.ERROR, 'Directory %s does not exist.' % self.options.dest_path) + if (not os.path.isdir(self.options.dest_path)): + self.log(self.ERROR, 'File path %s is not a directory.' %self.options.dest_path) + + srcfile = None + if os.environ.get('GPHOME_LOADERS'): + srcfile = os.path.join(os.environ.get('GPHOME_LOADERS'), + 'greenplum_loaders_path.sh') + elif os.environ.get('GPHOME'): + srcfile = os.path.join(os.environ.get('GPHOME'), + 'greenplum_path.sh') + if (not (srcfile and os.path.exists(srcfile))): + self.log(self.ERROR, 'cannot find greenplum environment ' + + 'file: environment misconfigured') + + cmdstrbase = "source %s ;" % srcfile + + cmdstrbase += "export PGPASSWORD=%s ; psql " % self.options.password + + if self.options.database != None: + cmdstrbase += "-d %s " % self.options.database + if self.options.host != None: + cmdstrbase += "-h %s " % self.options.host + if self.options.port != 0: + cmdstrbase += "-p %d " % self.options.port + if self.options.user != None: + cmdstrbase += "-U %s " % self.options.user + + try: + for file in self.allFilesToDownload: + fullpath = self.options.dest_path + '/' + file + if (os.path.exists(fullpath) and not self.options.force_write): + if (not os.path.isdir(fullpath)): + continue + else: + self.log(self.ERROR, 'file directory %s has existed' % fullpath) + + filedir = os.path.dirname(fullpath) + if (not os.path.exists(filedir)): + os.makedirs(filedir, exist_ok=True) + + cmdstr = cmdstrbase + cmdstr += '-c \"copy binary directory table %s \'%s\' to \'%s\' \"' % (self.options.table, file, fullpath) + + cmd = Command(name='download directory table', ctxt=LOCAL, cmdStr=cmdstr) + self.pool.addCommand(cmd) + self.pool.join() + items = self.pool.getCompletedItems() + for i in items: + if not i.was_successful(): + self.log(self.ERROR, 'failed download directory table %s to %s, msg:%s' % + (self.options.table, self.options.dest_path, i.get_results().stderr)) + self.pool.check_results() + except Exception as err: + self.log(self.ERROR, 'errors in job:') + self.log(self.ERROR, err.__str__()) + self.log(self.ERROR, 'exiting early') + finally: + self.pool.haltWork() + self.pool.joinWorkers() + + def run_upload(self): try: start = time.time() self.collectAllFiles() self.confirmWorkers() self.setup_connection() - self.startLoadFiles() + self.startUploadFiles() + self.log(self.INFO, 'running time: %.2f seconds' % (time.time() - start)) + except Exception as e: + raise + + def run_download(self): + try: + start = time.time() + self.setup_connection() + self.collectAllFilesToDownload() + self.confirmWorkers() + self.startDownloadFiles() self.log(self.INFO, 'running time: %.2f seconds' % (time.time() - start)) except Exception as e: raise @@ -422,17 +536,19 @@ class gpdirtableload: signal.signal(signal.SIGHUP, signal.SIG_IGN) try: - try: - self.run2() - except Exception: - traceback.print_exc(file=self.logfile) - self.logfile.flush() - self.exitValue = 2 - if (self.options.qv > self.INFO): - traceback.print_exc() - else: - self.log(self.ERROR, "unexpected error -- backtrace " + - "written to log file") + if self.upload == True: + self.run_upload() + else: + self.run_download() + except (Exception, SystemExit): + traceback.print_exc(file=self.logfile) + self.logfile.flush() + self.exitValue = 2 + if (self.options.qv > self.INFO): + traceback.print_exc() + else: + self.log(self.ERROR, "unexpected error -- backtrace " + + "written to log file") finally: if self.exitValue == 0: self.log(self.INFO, 'gpdirtableload succeeded') @@ -440,6 +556,7 @@ class gpdirtableload: self.log(self.INFO, 'gpdirtableload succeeded with warnings') else: self.log(self.INFO, 'gpdirtableload failed') + os._exit(self.exitValue) if __name__ == '__main__': diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c index 189eaa6926e..eb43d06fcce 100644 --- a/src/backend/commands/copy.c +++ b/src/backend/commands/copy.c @@ -472,9 +472,18 @@ DoCopy(ParseState *pstate, const CopyStmt *stmt, */ PG_TRY(); { - cstate = BeginCopyTo(pstate, rel, query, relid, - stmt->filename, stmt->is_program, - stmt->attlist, options); + if (rel && rel->rd_rel->relkind == RELKIND_DIRECTORY_TABLE) + { + cstate = BeginCopyToDirectoryTable(pstate, stmt->filename, stmt->dirfilename, + rel, stmt->is_program, options); + } + + else + { + cstate = BeginCopyTo(pstate, rel, query, relid, + stmt->filename, stmt->is_program, + stmt->attlist, options); + } /* * "copy t to file on segment" CopyDispatchOnSegment diff --git a/src/backend/commands/copyfrom.c b/src/backend/commands/copyfrom.c index 3736921eba4..fda42b3821f 100644 --- a/src/backend/commands/copyfrom.c +++ b/src/backend/commands/copyfrom.c @@ -216,8 +216,8 @@ static void InitCopyFromDispatchSplit(CopyFromState cstate, GpDistributionData * static unsigned int GetTargetSeg(GpDistributionData *distData, TupleTableSlot *slot); static uint64 CopyFromDirectoryTable(CopyFromState cstate); -static CopyFromState BeginCopyFromDirectoryTable(ParseState *pstate, const char *fileName, - Relation rel, List *options); +static CopyFromState BeginCopyFromDirectoryTable(ParseState *pstate, Relation rel, + const char *filename, bool is_program, List *options); /* * No more than this many tuples per CopyMultiInsertBuffer @@ -1220,8 +1220,9 @@ CopyFromDirectoryTable(CopyFromState cstate) */ static CopyFromState BeginCopyFromDirectoryTable(ParseState *pstate, - const char *filename, Relation rel, + const char *filename, + bool is_program, List *options) { CopyFromState cstate; @@ -1356,13 +1357,14 @@ BeginCopyFromDirectoryTable(ParseState *pstate, /* We keep those variables in cstate. */ cstate->in_functions = in_functions; cstate->typioparams = typioparams; - cstate->is_program = false; + cstate->is_program = is_program; pipe = (filename == NULL || cstate->dispatch_mode == COPY_EXECUTOR); if (pipe) { progress_vals[1] = PROGRESS_COPY_TYPE_PIPE; + Assert(!is_program || cstate->dispatch_mode == COPY_EXECUTOR); /* the grammar does not allow this */ if (whereToSendOutput == DestRemote) ReceiveCopyBegin(cstate); else @@ -1370,42 +1372,56 @@ BeginCopyFromDirectoryTable(ParseState *pstate, } else { - struct stat st; - cstate->filename = pstrdup(filename); - progress_vals[1] = PROGRESS_COPY_TYPE_FILE; - cstate->copy_file = AllocateFile(cstate->filename, PG_BINARY_R); - if (cstate->copy_file == NULL) + if (cstate->is_program) { - /* copy errno because ereport subfunctions might change it */ - int save_errno = errno; - - ereport(ERROR, - (errcode_for_file_access(), - errmsg("could not open file \"%s\" for reading: %m", - cstate->filename), - (save_errno == ENOENT || save_errno == EACCES) ? - errhint("COPY FROM instructs the PostgreSQL server process to read a file. " - "You may want a client-side facility such as psql's \\copy.") : 0)); + progress_vals[1] = PROGRESS_COPY_TYPE_PROGRAM; + cstate->program_pipes = open_program_pipes(cstate->filename, false); + cstate->copy_file = fdopen(cstate->program_pipes->pipes[0], PG_BINARY_R); + if (cstate->copy_file == NULL) + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not execute command \"%s\": %m", + cstate->filename))); } + else + { + struct stat st; - // Increase buffer size to improve performance (cmcdevitt) - /* GPDB_14_MERGE_FIXME: Ret value process. */ - setvbuf(cstate->copy_file, NULL, _IOFBF, 393216); // 384 Kbytes + progress_vals[1] = PROGRESS_COPY_TYPE_FILE; + cstate->copy_file = AllocateFile(cstate->filename, PG_BINARY_R); + if (cstate->copy_file == NULL) + { + /* copy errno because ereport subfunctions might change it */ + int save_error = errno; - if (fstat(fileno(cstate->copy_file), &st)) - ereport(ERROR, - (errcode_for_file_access(), - errmsg("could not stat file \"%s\": %m", - cstate->filename))); + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not open file \"%s\" for reading: %m", + cstate->filename), + (save_error == ENOENT || save_error == EACCES) ? + errhint("COPY FROM instructs the PostgreSQL server process to read a file. " + "You may want a client-side facility such as psql's \\copy.") : 0)); + } - if (S_ISDIR(st.st_mode)) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("\"%s\" is a directory", cstate->filename))); + // Increase buffer size to improve performance (cmcdevitt) + /* GPDB_14_MERGE_FIXME: Ret value process. */ + setvbuf(cstate->copy_file, NULL, _IOFBF, 393216); // 384 Kbytes + + if (fstat(fileno(cstate->copy_file), &st)) + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not stat file \"%s\": %m", + cstate->filename))); + + if (S_ISDIR(st.st_mode)) + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("\"%s\" is a directory", cstate->filename))); - progress_vals[2] = st.st_size; + progress_vals[2] = st.st_size; + } } pgstat_progress_update_multi_param(3, progress_cols, progress_vals); @@ -2465,7 +2481,7 @@ BeginCopyFrom(ParseState *pstate, }; if (rel->rd_rel->relkind == RELKIND_DIRECTORY_TABLE) - return BeginCopyFromDirectoryTable(pstate, filename, rel, options); + return BeginCopyFromDirectoryTable(pstate, rel, filename, is_program, options); /* Allocate workspace and zero all fields */ cstate = (CopyFromStateData *) palloc0(sizeof(CopyFromStateData)); @@ -2880,8 +2896,11 @@ BeginCopyFrom(ParseState *pstate, } else if (cstate->opts.binary) { - /* Read and verify binary header */ - ReceiveCopyBinaryHeader(cstate); + if (cstate->rel->rd_rel->relkind != RELKIND_DIRECTORY_TABLE) + { + /* Read and verify binary header */ + ReceiveCopyBinaryHeader(cstate); + } } /* create workspace for CopyReadAttributes results */ @@ -3930,12 +3949,19 @@ static void EndCopyFromDirectoryTable(CopyFromState cstate) { /* No COPY FROM related resources except memory. */ - if (cstate->copy_file && FreeFile(cstate->copy_file)) + if (cstate->is_program) { - ereport(ERROR, - (errcode_for_file_access(), - errmsg("could not close file \"%s\": %m", - cstate->filename))); + close_program_pipes(cstate->program_pipes, true); + } + else + { + if (cstate->copy_file && FreeFile(cstate->copy_file)) + { + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not close file \"%s\": %m", + cstate->filename))); + } } /* Clean up single row error handling related memory */ diff --git a/src/backend/commands/copyto.c b/src/backend/commands/copyto.c index 95c9aa48b04..fe4252905f1 100644 --- a/src/backend/commands/copyto.c +++ b/src/backend/commands/copyto.c @@ -25,9 +25,12 @@ #include "access/xact.h" #include "access/xlog.h" #include "catalog/namespace.h" +#include "catalog/pg_directory_table.h" #include "commands/copy.h" #include "commands/copyto_internal.h" #include "commands/progress.h" +#include "common/cryptohash.h" +#include "common/md5.h" #include "executor/execdesc.h" #include "executor/executor.h" #include "executor/tuptable.h" @@ -70,6 +73,8 @@ static uint64 CopyToDispatch(CopyToState cstate); static void CopyToDispatchFlush(CopyToState cstate); static uint64 CopyTo(CopyToState cstate); static void CopySendChar(CopyToState cstate, char c); +static uint64 CopyToDispatchDirectoryTable(CopyToState cstate); +static uint64 CopyToDirectoryTable(CopyToState cstate); /* Low-level communications functions */ static void SendCopyBegin(CopyToState cstate); @@ -1644,6 +1649,9 @@ CopyToDispatch(CopyToState cstate) CdbCopy *cdbCopy; uint64 processed = 0; + if (cstate->rel->rd_rel->relkind == RELKIND_DIRECTORY_TABLE) + return CopyToDispatchDirectoryTable(cstate); + tupDesc = cstate->rel->rd_att; num_phys_attrs = tupDesc->natts; attr_count = list_length(cstate->attnumlist); @@ -1786,6 +1794,93 @@ CopyToDispatch(CopyToState cstate) return processed; } +/* + * Copy FROM directory table TO file, in the dispatcher. Starts a COPY TO command + * on each of the executors and gathers all the results and writes it out. + */ +static uint64 +CopyToDispatchDirectoryTable(CopyToState cstate) +{ + CopyStmt *stmt = glob_copystmt; + TupleDesc tupDesc; + CdbCopy *cdbCopy; + uint64 processed = 0; + + tupDesc = cstate->rel->rd_att; + + cstate->fe_msgbuf = makeStringInfo(); + cdbCopy = makeCdbCopyTo(cstate); + + /* + * Start a COPY command in every db of every segment in Cloudberry Database. + * + * From this point in the code we need to be extra careful + * about error handling. ereport() must not be called until + * the COPY command sessions are closed on the executors. + * Calling ereport() will leave the executors hanging in + * COPY state. + */ + elog(DEBUG5, "COPY command sent to segdbs"); + + PG_TRY(); + { + bool done; + + cdbCopyStart(cdbCopy, stmt, cstate->file_encoding); + + if (!cstate->opts.binary) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Only support copy binary to directory table."))); + + /* + * This is the main work-loop. In here we keep collecting data from the + * COPY commands on the segdbs, until no more data is available. We + * keep writing data out a chunk at a time. + */ + do + { + bool copy_cancel = (QueryCancelPending ? true : false); + + /* get a chunk of data rows from the QE's */ + done = cdbCopyGetData(cdbCopy, copy_cancel, &processed); + + /* send the chunk of data rows to destination (file or stdout) */ + if (cdbCopy->copy_out_buf.len > 0) /* conditional is important! */ + { + /* + * in the dispatcher we receive chunks of file and flush it. + */ + CopySendData(cstate, (void *) cdbCopy->copy_out_buf.data, cdbCopy->copy_out_buf.len); + CopyToDispatchFlush(cstate); + } + } while (!done); + + cdbCopyEnd(cdbCopy, NULL, NULL); + + /* now it's safe to destroy the whole dispatcher state */ + CdbDispatchCopyEnd(cdbCopy); + } + /* catch error */ + PG_CATCH(); + { + MemoryContext oldcontext = MemoryContextSwitchTo(cstate->copycontext); + + cdbCopyAbort(cdbCopy); + + MemoryContextSwitchTo(oldcontext); + PG_RE_THROW(); + } + PG_END_TRY(); + + /* we can throw the error now if QueryCancelPending was set previously */ + CHECK_FOR_INTERRUPTS(); + + pfree(cdbCopy); + + return 1; +} + /* * Copy from relation or query TO file. */ @@ -1797,6 +1892,9 @@ CopyTo(CopyToState cstate) ListCell *cur; uint64 processed = 0; + if (cstate->rel && cstate->rel->rd_rel->relkind == RELKIND_DIRECTORY_TABLE) + return CopyToDirectoryTable(cstate); + if (cstate->rel) tupDesc = RelationGetDescr(cstate->rel); else @@ -1955,6 +2053,139 @@ CopyTo(CopyToState cstate) return processed; } +/* + * Copy directory table To QD. + */ +static uint64 +CopyToDirectoryTable(CopyToState cstate) +{ + uint64 processed = 0; + TupleTableSlot *slot; + TableScanDesc scandesc; + ScanKeyData skey; + Datum datum; + bool isnull; + char *relative_path; + char *scopedFileUrl; + DirectoryTable *dirTable; + UFile *file; + int64_t file_size; + pg_cryptohash_ctx *hashCtx; + char hexMd5Sum[256]; + uint8 md5Sum[MD5_DIGEST_LENGTH]; + char *md5; + + Assert(cstate->rel); + + dirTable = GetDirectoryTable(RelationGetRelid(cstate->rel)); + + /* We use fe_msgbuf as a per-row buffer regardless of copy_dest */ + cstate->fe_msgbuf = makeStringInfo(); + + /* + * Create a temporary memory context that we can reset once per row to + * recover palloc'd memory. This avoids any problems with leaks inside + * datatype output routines, and should be faster than retail pfree's + * anyway. (We don't need a whole econtext as CopyFrom does.) + */ + cstate->rowcontext = AllocSetContextCreate(CurrentMemoryContext, + "COPY TO", + ALLOCSET_DEFAULT_SIZES); + + ScanKeyInit(&skey, + (AttrNumber) 1, + BTEqualStrategyNumber, F_TEXTEQ, + CStringGetTextDatum(cstate->dirfilename)); + scandesc = table_beginscan(cstate->rel, GetActiveSnapshot(), 1, &skey); + slot = table_slot_create(cstate->rel, NULL); + + processed = 0; + if (table_scan_getnextslot(scandesc, ForwardScanDirection, slot)) + { + char buffer[4096]; + char errorMessage[256]; + int bytesRead; + + /* Deconstruct the tuple ... */ + slot_getallattrs(slot); + + datum = slot_getattr(slot, 1, &isnull); + Assert(isnull == false); + relative_path = TextDatumGetCString(datum); + scopedFileUrl = psprintf("%s/%s", dirTable->location, relative_path); + + hashCtx = pg_cryptohash_create(PG_MD5); + if (hashCtx == NULL) + ereport(ERROR, + (errcode(ERRCODE_OUT_OF_MEMORY), + errmsg("failed to create md5hash context: out of memory"))); + pg_cryptohash_init(hashCtx); + + file = UFileOpen(dirTable->spcId, + scopedFileUrl, + O_RDONLY, + errorMessage, + sizeof(errorMessage)); + if (file == NULL) + ereport(ERROR, + (errcode(ERRCODE_IO_ERROR), + errmsg("failed to open file \"%s\": %s", + scopedFileUrl, errorMessage))); + + file_size = UFileSize(file); + if (file_size > MaxAllocSize - VARHDRSZ) + ereport(ERROR, + (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), + errmsg("out of memory"))); + + while (true) + { + bytesRead = UFileRead(file, buffer, sizeof(buffer)); + if (bytesRead == -1) + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("failed to read file \"%s\": %s", scopedFileUrl, UFileGetLastError(file)))); + + if (bytesRead == 0) + break; + + CopySendData(cstate, buffer, bytesRead); + pg_cryptohash_update(hashCtx, (const uint8 *) buffer, bytesRead); + CopySendEndOfRow(cstate); + } + + pg_cryptohash_final(hashCtx, md5Sum, sizeof(md5Sum)); + pg_cryptohash_free(hashCtx); + bytesToHex(md5Sum, hexMd5Sum); + + /* Get md5 from schema table */ + datum = slot_getattr(slot, 4, &isnull); + Assert(isnull == false); + md5 = TextDatumGetCString(datum); + + if (strcmp(md5, hexMd5Sum) != 0) + ereport(ERROR, + (errcode(ERRCODE_IO_ERROR), + errmsg("Copy directory table to file failed, as file content is not consistent."))); + + UFileClose(file); + + /* + * Increment the number of processed tuples, and report the + * progress. + */ + pgstat_progress_update_param(PROGRESS_COPY_TUPLES_PROCESSED, + ++processed); + } + + ExecDropSingleTupleTableSlot(slot); + table_endscan(scandesc); + + MemoryContextDelete(cstate->rowcontext); + + return processed; +} + void CopyOneCustomRowTo(CopyToState cstate, bytea *value) { @@ -1999,3 +2230,182 @@ CopySendChar(CopyToState cstate, char c) { appendStringInfoCharMacro(cstate->fe_msgbuf, c); } + +CopyToState +BeginCopyToDirectoryTable(ParseState *pstate, + const char *filename, + const char *dirfilename, + Relation rel, + bool is_program, + List *options) +{ + CopyToState cstate; + bool pipe; + MemoryContext oldcontext; + const int progress_cols[] = { + PROGRESS_COPY_COMMAND, + PROGRESS_COPY_TYPE + }; + int64 progress_vals[] = { + PROGRESS_COPY_COMMAND_TO, + 0 + }; + + if (!glob_copystmt->dirfilename) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("COPY to directory table must specify the relative_path name."))); + + /* Allocate workspace and zero all fields */ + cstate = (CopyToStateData *) palloc0(sizeof(CopyToStateData)); + + glob_cstate = cstate; + + /* + * We allocate everying used by a cstate in a new memory context. This + * avoids memory leaks during repeated use of COPY in a query. + */ + cstate->copycontext = AllocSetContextCreate(CurrentMemoryContext, + "COPY", + ALLOCSET_DEFAULT_SIZES); + + oldcontext = MemoryContextSwitchTo(cstate->copycontext); + + /* Process the target relation */ + Assert(rel); + cstate->rel = rel; + + /* Check whether copy directory table options allowed */ + ProcessCopyDirectoryTableOptions(pstate, &cstate->opts, true, options, rel->rd_id); + + if (Gp_role == GP_ROLE_DISPATCH && !cstate->opts.binary) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Only support copy binary directory table to."))); + + cstate->file_encoding = GetDatabaseEncoding(); + cstate->need_transcoding = false; + + /* See Multibyte encoding comment above */ + cstate->encoding_embeds_ascii = PG_ENCODING_IS_CLIENT_ONLY(cstate->file_encoding); + + cstate->copy_dest = COPY_FILE; /* default */ + + pipe = (filename == NULL || Gp_role == GP_ROLE_EXECUTE); + + /* Determine the mode */ + if (Gp_role == GP_ROLE_DISPATCH && cstate->opts.on_segment && + cstate->rel && cstate->rel->rd_cdbpolicy) + { + cstate->dispatch_mode = COPY_DISPATCH; + } + else + cstate->dispatch_mode = COPY_DIRECT; + + if (cstate->opts.on_segment && Gp_role == GP_ROLE_DISPATCH) + { + /* in On SEGMENT mode, we don't open anything on the dispatcher. */ + + if (filename == NULL) + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("STDOUT is not supported by 'COPY ON SEGMENT'"))); + } + else if (pipe) + { + progress_vals[1] = PROGRESS_COPY_TYPE_PIPE; + cstate->dirfilename = pstrdup(dirfilename); + + /* + * the grammar does not allow this on QD. + * on QE, this could happen + */ + Assert(!is_program || Gp_role == GP_ROLE_EXECUTE); + if (whereToSendOutput != DestRemote) + cstate->copy_file = stdout; + } + else + { + cstate->filename = pstrdup(filename); + cstate->dirfilename = pstrdup(dirfilename); + cstate->is_program = is_program; + + if (is_program) + { + progress_vals[1] = PROGRESS_COPY_TYPE_PROGRAM; + cstate->program_pipes = open_program_pipes(cstate->filename, true); + cstate->copy_file = fdopen(cstate->program_pipes->pipes[EXEC_DATA_P], PG_BINARY_W); + if (cstate->copy_file == NULL) + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not execute command \"%s\": %m", + cstate->filename))); + } + else + { + mode_t oumask; /* Pre-existing umask value */ + struct stat st; + + progress_vals[1] = PROGRESS_COPY_TYPE_FILE; + + /* + * Prevent write to relative path ... too easy to shoot oneself in + * the foot by overwriting a database file ... + */ + if (!is_absolute_path(cstate->filename)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_NAME), + errmsg("relative path not allowed for COPY to file"))); + + oumask = umask(S_IWGRP | S_IWOTH); + PG_TRY(); + { + cstate->copy_file = AllocateFile(cstate->filename, PG_BINARY_W); + } + PG_FINALLY(); + { + umask(oumask); + } + PG_END_TRY(); + if (cstate->copy_file == NULL) + { + /* copy errno because ereport subfunctions might change it */ + int save_errno = errno; + + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not open file \"%s\" for writing: %m", + cstate->filename), + (save_errno == ENOENT || save_errno == EACCES) ? + errhint("COPY TO instructs the PostgreSQL server process to write a file. " + "You may want a client-side facility such as psql's \\copy.") : 0)); + } + + // Increase buffer size to improve performance (cmcdevitt) + /* GPDB_14_MERGE_FIXME: Ret value process. */ + setvbuf(cstate->copy_file, NULL, _IOFBF, 393216); // 384 Kbytes + + if (fstat(fileno(cstate->copy_file), &st)) + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not stat file \"%s\": %m", + cstate->filename))); + + if (S_ISDIR(st.st_mode)) + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("\"%s\" is a directory", cstate->filename))); + } + } + /* initialize progress */ + pgstat_progress_start_command(PROGRESS_COMMAND_COPY, + cstate->rel ? RelationGetRelid(cstate->rel) :InvalidOid); + pgstat_progress_update_multi_param(2, progress_cols, progress_vals); + + cstate->bytes_processed = 0; + + MemoryContextSwitchTo(oldcontext); + + return cstate; +} + diff --git a/src/backend/commands/dirtablecmds.c b/src/backend/commands/dirtablecmds.c index 33e7a6cc933..1346f1c8e79 100644 --- a/src/backend/commands/dirtablecmds.c +++ b/src/backend/commands/dirtablecmds.c @@ -168,6 +168,9 @@ CreateDirectoryTable(CreateDirectoryTableStmt *stmt, Oid relId) CatalogTupleInsert(dirRelation, tuple); + /* Add dependency with tablespace */ + recordDependencyOnTablespace(RelationRelationId, relId, spcId); + heap_freetuple(tuple); table_close(dirRelation, RowExclusiveLock); @@ -185,7 +188,7 @@ getFileContent(Oid spcId, char *scopedFileUrl) int bytesRead; bytea *content; UFile *file; - int64 fileSize; + int64_t fileSize; file = UFileOpen(spcId, scopedFileUrl, diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c index 9066ce3bb40..48140959209 100644 --- a/src/backend/commands/indexcmds.c +++ b/src/backend/commands/indexcmds.c @@ -705,9 +705,6 @@ DefineIndex(Oid relationId, bool shouldDispatch; Oid blkdirrelid = InvalidOid; - if (RelationIsDirectoryTable(relationId)) - elog(ERROR, "Disallowed to create index on directory table \"%s\".", get_rel_name(relationId)); - shouldDispatch = (Gp_role == GP_ROLE_DISPATCH && ENABLE_DISPATCH() && !IsBootstrapProcessingMode()); diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 50913f5cb4f..e43db15714e 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -1790,10 +1790,10 @@ RemoveRelations(DropStmt *drop) if (HeapTupleIsValid(indexTuple)) { index = (Form_pg_index) GETSTRUCT(indexTuple); - if (RelationIsDirectoryTable(index->indrelid)) + if (RelationIsDirectoryTable(index->indrelid) && index->indisprimary) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Disallowed to drop index \"%s\" on directory table \"%s\"", + errmsg("Disallowed to drop primary index \"%s\" on directory table \"%s\"", get_rel_name(index->indexrelid), get_rel_name(index->indrelid)))); ReleaseSysCache(indexTuple); } diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y index ac611cf55af..5b99cc679a7 100644 --- a/src/backend/parser/gram.y +++ b/src/backend/parser/gram.y @@ -4569,6 +4569,26 @@ CopyStmt: COPY opt_binary qualified_name opt_column_list * not fair very well with \' or \( \) chars. */ } + | COPY BINARY DIRECTORY TABLE qualified_name Sconst TO opt_program copy_file_name + { + CopyStmt *n = makeNode(CopyStmt); + n->relation = $5; + n->query = NULL; + n->attlist = NIL; + n->is_from = false; + n->is_program = $8; + n->filename = $9; + n->dirfilename = $6; + n->options = lappend(n->options, makeDefElem("format", (Node *)makeString("binary"), @2)); + + if (n->is_program && n->filename == NULL) + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("STDIN/STDOUT not allowed with PROGRAM"), + parser_errposition(@7))); + + $$ = (Node *)n; + } ; copy_from: @@ -19010,7 +19030,6 @@ unreserved_keyword: | DEPTH | DETACH | DICTIONARY - | DIRECTORY | DISABLE_P | DISCARD | DOCUMENT_P @@ -19747,6 +19766,7 @@ reserved_keyword: | DEFAULT | DEFERRABLE | DESC + | DIRECTORY | DISTINCT | DISTRIBUTED /* gp */ | DO diff --git a/src/bin/psql/copy.c b/src/bin/psql/copy.c index 50a48a97259..a9d589539cb 100644 --- a/src/bin/psql/copy.c +++ b/src/bin/psql/copy.c @@ -139,6 +139,26 @@ parse_slash_copy(const char *args) } } + /* Handle COPY DIRECTORY TABLE TO case */ + if (pg_strcasecmp(token, "directory") == 0) + { + xstrcat(&result->before_tofrom, " "); + xstrcat(&result->before_tofrom, token); + token = strtokx(NULL, whitespace, ".,()", "\"", + 0, false, false, pset.encoding); + + if (!token || pg_strcasecmp(token, "table") != 0) + goto error; + + xstrcat(&result->before_tofrom, " "); + xstrcat(&result->before_tofrom, token); + token = strtokx(NULL, whitespace, ".,()", "\"", + 0, false, false, pset.encoding); + + if (!token) + goto error; + } + xstrcat(&result->before_tofrom, " "); xstrcat(&result->before_tofrom, token); token = strtokx(NULL, whitespace, ".,()", "\"", @@ -187,6 +207,16 @@ parse_slash_copy(const char *args) goto error; } + if (token[0] == '\'') + { + xstrcat(&result->before_tofrom, " "); + xstrcat(&result->before_tofrom, token); + token = strtokx(NULL, whitespace, "()", "\"", + 0, false, false, pset.encoding); + if (!token) + goto error; + } + if (pg_strcasecmp(token, "from") == 0) result->from = true; else if (pg_strcasecmp(token, "to") == 0) diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c index 5de2cefdb75..3a23859314d 100644 --- a/src/bin/psql/describe.c +++ b/src/bin/psql/describe.c @@ -2470,7 +2470,7 @@ describeOneTableDetails(const char *schemaname, schemaname, relationname); break; case RELKIND_DIRECTORY_TABLE: - printfPQExpBuffer(&title, _("Directory able \"%s.%s\""), + printfPQExpBuffer(&title, _("Directory table \"%s.%s\""), schemaname, relationname); break; case RELKIND_VIEW: diff --git a/src/include/commands/copy.h b/src/include/commands/copy.h index fb278d1329a..9113eab451d 100644 --- a/src/include/commands/copy.h +++ b/src/include/commands/copy.h @@ -171,6 +171,8 @@ extern void CopyOneRowTo(CopyToState cstate, TupleTableSlot *slot); extern void CopySendEndOfRow(CopyToState cstate); extern void truncateEol(StringInfo buf, EolType eol_type); extern void truncateEolStr(char *str, EolType eol_type); +extern CopyToState BeginCopyToDirectoryTable(ParseState *pstate, const char *filename, const char *dirfilename, + Relation rel, bool is_program, List *options); /* * This is used to hold information about the target's distribution policy, diff --git a/src/include/commands/copyto_internal.h b/src/include/commands/copyto_internal.h index 3f2f6ec0fab..a6ae990015b 100644 --- a/src/include/commands/copyto_internal.h +++ b/src/include/commands/copyto_internal.h @@ -52,6 +52,7 @@ typedef struct CopyToStateData List *attnumlist; /* integer list of attnums to copy */ List *attnamelist; /* list of attributes by name */ char *filename; /* filename, or NULL for STDOUT */ + char *dirfilename; /* filename of directory table, not NULL for copy directory table to */ bool is_program; /* is 'filename' a program to popen? */ CopyFormatOptions opts; diff --git a/src/include/parser/kwlist.h b/src/include/parser/kwlist.h index c18685486a4..ed3551490ee 100644 --- a/src/include/parser/kwlist.h +++ b/src/include/parser/kwlist.h @@ -147,7 +147,7 @@ PG_KEYWORD("depth", DEPTH, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("desc", DESC, RESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("detach", DETACH, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("dictionary", DICTIONARY, UNRESERVED_KEYWORD, BARE_LABEL) -PG_KEYWORD("directory", DIRECTORY, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("directory", DIRECTORY, RESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("disable", DISABLE_P, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("discard", DISCARD, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("distinct", DISTINCT, RESERVED_KEYWORD, BARE_LABEL) diff --git a/src/test/isolation2/input/local_directory_table_mixed.source b/src/test/isolation2/input/local_directory_table_mixed.source index 708cd31ad4d..da810a115b7 100644 --- a/src/test/isolation2/input/local_directory_table_mixed.source +++ b/src/test/isolation2/input/local_directory_table_mixed.source @@ -129,4 +129,8 @@ CREATE EXTENSION IF NOT EXISTS gp_inject_fault; 2: COMMIT; ! rm -rf '@testtablespace@'; +DROP TABLESPACE directory_tblspc; +DROP DIRECTORY TABLE dir_table1; +DROP DIRECTORY TABLE dir_table2; +DROP DIRECTORY TABLE dir_table3; DROP TABLESPACE directory_tblspc; \ No newline at end of file diff --git a/src/test/isolation2/output/local_directory_table_mixed.source b/src/test/isolation2/output/local_directory_table_mixed.source index c42574fd1d6..4674f7ea71e 100644 --- a/src/test/isolation2/output/local_directory_table_mixed.source +++ b/src/test/isolation2/output/local_directory_table_mixed.source @@ -372,5 +372,16 @@ COMMIT ! rm -rf '@testtablespace@'; +DROP TABLESPACE directory_tblspc; +ERROR: tablespace "directory_tblspc" cannot be dropped because some objects depend on it +DETAIL: tablespace for directory table dir_table1 +tablespace for directory table dir_table3 +tablespace for directory table dir_table2 +DROP DIRECTORY TABLE dir_table1; +DROP +DROP DIRECTORY TABLE dir_table2; +DROP +DROP DIRECTORY TABLE dir_table3; +DROP DROP TABLESPACE directory_tblspc; DROP diff --git a/src/test/regress/input/directory_table.source b/src/test/regress/input/directory_table.source index 9298ba3c9b9..36ed0d4a307 100644 --- a/src/test/regress/input/directory_table.source +++ b/src/test/regress/input/directory_table.source @@ -238,11 +238,11 @@ SELECT relname, relisshared, relpersistence, relkind FROM pg_class WHERE relname -- Test CREATE/DROP/REINDEX on DIRECTORY SCHEMA TABLE -- Test CREATE INDEX on DIRECTORY SCHEMA TABLE -CREATE INDEX dirtable1_relative_path_idx on dir_table1(relative_path); -- fail -CREATE INDEX dirtable1_size_idx on dir_table1(size); -- fail -CREATE INDEX dirtable1_last_modified_idx on dir_table1(last_modified); -- fail -CREATE INDEX dirtable1_md5_idx on dir_table1(md5); -- fail -CREATE INDEX dirtable1_tag_idx on dir_table1(tag); -- fail +CREATE INDEX dirtable1_relative_path_idx on dir_table1(relative_path); +CREATE INDEX dirtable1_size_idx on dir_table1(size); +CREATE INDEX dirtable1_last_modified_idx on dir_table1(last_modified); +CREATE INDEX dirtable1_md5_idx on dir_table1(md5); +CREATE INDEX dirtable1_tag_idx on dir_table1(tag); \d+ dir_table1; -- Test DROP INDEX on DIRECTORY SCHEMA TABLE @@ -252,6 +252,12 @@ DROP INDEX dir_table3_pkey; -- fail DROP INDEX dir_table4_pkey; -- fail DROP INDEX dir_table5_pkey; -- fail DROP INDEX dir_table6_pkey; -- fail +DROP INDEX dirtable1_relative_path_idx; -- OK +DROP INDEX dirtable1_size_idx; -- OK +DROP INDEX dirtable1_last_modified_idx; -- OK +DROP INDEX dirtable1_md5_idx; -- OK +DROP INDEX dirtable1_tag_idx; -- OK +\d+ dir_table1; -- Test REINDEX on DIRECTORY SCHEMA TABLE REINDEX INDEX dir_table1_pkey; @@ -269,21 +275,20 @@ REINDEX TABLE dir_table5; REINDEX TABLE dir_table6; -- Test triggers -DROP FUNCTION IF EXISTS trigtest; -create function trigtest() returns trigger as $$ +create function triggertest() returns trigger as $$ begin raise notice '% % % %', TG_TABLE_NAME, TG_OP, TG_WHEN, TG_LEVEL; return new; end;$$ language plpgsql; create trigger trigtest_b_row_tg_dirtable_1 before insert or update or delete on dir_table1 -for each row execute procedure trigtest(); +for each row execute procedure triggertest(); create trigger trigtest_a_row_tg_dirtable_1 after insert or update or delete on dir_table1 -for each row execute procedure trigtest(); +for each row execute procedure triggertest(); create trigger trigtest_b_stmt_tg_dirtable_1 before insert or update or delete on dir_table1 -for each statement execute procedure trigtest(); +for each statement execute procedure triggertest(); create trigger trigtest_a_stmt_tg_dirtable_1 after insert or update or delete on dir_table1 -for each statement execute procedure trigtest(); +for each statement execute procedure triggertest(); -- Test COPY DIRECTORY TABLE syntax SELECT relative_path, size, tag FROM dir_table1 ORDER BY 1; @@ -304,17 +309,16 @@ SELECT relative_path, size, tag FROM dir_table2 ORDER BY 1; SELECT relative_path, size, tag FROM dir_table1 ORDER BY 1; SELECT relative_path, content FROM directory_table('dir_table1') ORDER BY 1; -COPY dir_table2 FROM '@abs_srcdir@/data/nation.csv'; -- fail -COPY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation'; -- fail -COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv'; -- fail -COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation1'; -COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation1'; -- fail -COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation2'; -COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 'nation'; -COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 'nation'; -- fail -COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 'nation2'; -- fail -COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation4' WITH TAG 'nation'; -COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation5' WITH TAG 'nation' WITH TAG 'nation2'; -- fail +COPY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv'; -- fail +COPY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation'; -- fail +COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation1'; +COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation1'; -- fail +COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation2'; +COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 'nation'; +COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 'nation'; -- fail +COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 'nation2'; -- fail +COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation4' WITH TAG 'nation'; +COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation5' WITH TAG 'nation' WITH TAG 'nation2'; -- fail SELECT relative_path, size, tag FROM dir_table2 ORDER BY 1; SELECT relative_path, content FROM directory_table('dir_table2') ORDER BY 1; @@ -371,7 +375,7 @@ SELECT md5_equal('dir_table2', 'nation2'); SELECT md5_equal('dir_table2', 'nation3'); SELECT md5_equal('dir_table2', 'nation4'); --- Does not support copy to +-- Test Copy To directory table \COPY dir_table1 TO '@abs_srcdir@/data/dir_table1'; -- fail \COPY BINARY dir_table1 TO '@abs_srcdir@/data/dir_table1'; -- fail COPY dir_table1 TO '@abs_srcdir@/data/dir_table1'; -- fail @@ -380,6 +384,17 @@ COPY BINARY dir_table1 TO '@abs_srcdir@/data/dir_table1'; -- fail \COPY BINARY dir_table2 TO '@abs_srcdir@/data/dir_table2'; -- fail COPY dir_table2 TO '@abs_srcdir@/data/dir_table2'; -- fail COPY BINARY dir_table2 TO '@abs_srcdir@/data/dir_table2'; -- fail +\COPY BINARY dir_table1 TO '@abs_srcdir@/data/dir_table1'; -- fail +COPY BINARY dir_table1 TO '@abs_srcdir@/data/dir_table1'; -- fail +\COPY BINARY DIRECTORY TABLE dir_table1 'nation1' TO '@abs_srcdir@/data/nation1'; -- OK +COPY BINARY DIRECTORY TABLE dir_table1 'nation1' TO '@abs_srcdir@/data/nation1'; -- OK +\COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO stdin; -- OK +COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO stdin; -- OK +\COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO stdout; -- OK +COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO stdout; -- OK +\COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO PROGRAM 'gzip -c -1 > @abs_srcdir@/data/nation2.gz'; -- OK +COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO PROGRAM 'gzip -c -1 > @abs_srcdir@/data/nation2.gz'; -- OK + SELECT relative_path, size, tag FROM dir_table1 ORDER BY 1; SELECT relative_path, size, tag FROM dir_table2 ORDER BY 1; @@ -596,7 +611,7 @@ DROP DIRECTORY TABLE IF EXISTS dir_table4; DROP DIRECTORY TABLE IF EXISTS dir_table5; DROP DIRECTORY TABLE IF EXISTS dir_table6; -DROP FUNCTION IF EXISTS trigtest; +DROP FUNCTION IF EXISTS triggertest; DROP STORAGE USER MAPPING IF EXISTS FOR CURRENT_USER STORAGE SERVER oss_server1; DROP STORAGE USER MAPPING IF EXISTS FOR CURRENT_USER STORAGE SERVER oss_server2; diff --git a/src/test/regress/output/directory_table.source b/src/test/regress/output/directory_table.source index 52f5365dd91..d407c797710 100644 --- a/src/test/regress/output/directory_table.source +++ b/src/test/regress/output/directory_table.source @@ -552,7 +552,7 @@ SELECT relname, relisshared, relpersistence, relkind FROM pg_class WHERE relname (12 rows) \d+ dir_table1; - Directory able "public.dir_table1" + Directory table "public.dir_table1" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------------+--------------------------+-----------+----------+---------+----------+--------------+------------- relative_path | text | | | | extended | | @@ -565,7 +565,7 @@ Indexes: Distributed by: (relative_path) \d+ dir_table2; - Directory able "public.dir_table2" + Directory table "public.dir_table2" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------------+--------------------------+-----------+----------+---------+----------+--------------+------------- relative_path | text | | | | extended | | @@ -578,7 +578,7 @@ Indexes: Distributed by: (relative_path) \d+ dir_table3; - Directory able "public.dir_table3" + Directory table "public.dir_table3" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------------+--------------------------+-----------+----------+---------+----------+--------------+------------- relative_path | text | | | | extended | | @@ -659,18 +659,13 @@ SELECT relname, relisshared, relpersistence, relkind FROM pg_class WHERE relname \c regression -- Test CREATE/DROP/REINDEX on DIRECTORY SCHEMA TABLE -- Test CREATE INDEX on DIRECTORY SCHEMA TABLE -CREATE INDEX dirtable1_relative_path_idx on dir_table1(relative_path); -- fail -ERROR: Disallowed to create index on directory table "dir_table1". (indexcmds.c:709) -CREATE INDEX dirtable1_size_idx on dir_table1(size); -- fail -ERROR: Disallowed to create index on directory table "dir_table1". (indexcmds.c:709) -CREATE INDEX dirtable1_last_modified_idx on dir_table1(last_modified); -- fail -ERROR: Disallowed to create index on directory table "dir_table1". (indexcmds.c:709) -CREATE INDEX dirtable1_md5_idx on dir_table1(md5); -- fail -ERROR: Disallowed to create index on directory table "dir_table1". (indexcmds.c:709) -CREATE INDEX dirtable1_tag_idx on dir_table1(tag); -- fail -ERROR: Disallowed to create index on directory table "dir_table1". (indexcmds.c:709) +CREATE INDEX dirtable1_relative_path_idx on dir_table1(relative_path); +CREATE INDEX dirtable1_size_idx on dir_table1(size); +CREATE INDEX dirtable1_last_modified_idx on dir_table1(last_modified); +CREATE INDEX dirtable1_md5_idx on dir_table1(md5); +CREATE INDEX dirtable1_tag_idx on dir_table1(tag); \d+ dir_table1; - Directory able "public.dir_table1" + Directory table "public.dir_table1" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------------+--------------------------+-----------+----------+---------+----------+--------------+------------- relative_path | text | | | | extended | | @@ -680,21 +675,44 @@ ERROR: Disallowed to create index on directory table "dir_table1". (indexcmds.c tag | text | | | | extended | | Indexes: "dir_table1_pkey" PRIMARY KEY, btree (relative_path) + "dirtable1_last_modified_idx" btree (last_modified) + "dirtable1_md5_idx" btree (md5) + "dirtable1_relative_path_idx" btree (relative_path) + "dirtable1_size_idx" btree (size) + "dirtable1_tag_idx" btree (tag) Distributed by: (relative_path) -- Test DROP INDEX on DIRECTORY SCHEMA TABLE DROP INDEX dir_table1_pkey; -- fail -ERROR: Disallowed to drop index "dir_table1_pkey" on directory table "dir_table1" +ERROR: Disallowed to drop primary index "dir_table1_pkey" on directory table "dir_table1" DROP INDEX dir_table2_pkey; -- fail -ERROR: Disallowed to drop index "dir_table2_pkey" on directory table "dir_table2" +ERROR: Disallowed to drop primary index "dir_table2_pkey" on directory table "dir_table2" DROP INDEX dir_table3_pkey; -- fail -ERROR: Disallowed to drop index "dir_table3_pkey" on directory table "dir_table3" +ERROR: Disallowed to drop primary index "dir_table3_pkey" on directory table "dir_table3" DROP INDEX dir_table4_pkey; -- fail ERROR: index "dir_table4_pkey" does not exist DROP INDEX dir_table5_pkey; -- fail ERROR: index "dir_table5_pkey" does not exist DROP INDEX dir_table6_pkey; -- fail -ERROR: Disallowed to drop index "dir_table6_pkey" on directory table "dir_table6" +ERROR: Disallowed to drop primary index "dir_table6_pkey" on directory table "dir_table6" +DROP INDEX dirtable1_relative_path_idx; -- OK +DROP INDEX dirtable1_size_idx; -- OK +DROP INDEX dirtable1_last_modified_idx; -- OK +DROP INDEX dirtable1_md5_idx; -- OK +DROP INDEX dirtable1_tag_idx; -- OK +\d+ dir_table1; + Directory table "public.dir_table1" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +---------------+--------------------------+-----------+----------+---------+----------+--------------+------------- + relative_path | text | | | | extended | | + size | bigint | | | | plain | | + last_modified | timestamp with time zone | | | | plain | | + md5 | text | | | | extended | | + tag | text | | | | extended | | +Indexes: + "dir_table1_pkey" PRIMARY KEY, btree (relative_path) +Distributed by: (relative_path) + -- Test REINDEX on DIRECTORY SCHEMA TABLE REINDEX INDEX dir_table1_pkey; REINDEX INDEX dir_table2_pkey; @@ -713,21 +731,20 @@ REINDEX TABLE dir_table5; ERROR: relation "dir_table5" does not exist REINDEX TABLE dir_table6; -- Test triggers -DROP FUNCTION IF EXISTS trigtest; -create function trigtest() returns trigger as $$ +create function triggertest() returns trigger as $$ begin raise notice '% % % %', TG_TABLE_NAME, TG_OP, TG_WHEN, TG_LEVEL; return new; end;$$ language plpgsql; create trigger trigtest_b_row_tg_dirtable_1 before insert or update or delete on dir_table1 -for each row execute procedure trigtest(); +for each row execute procedure triggertest(); create trigger trigtest_a_row_tg_dirtable_1 after insert or update or delete on dir_table1 -for each row execute procedure trigtest(); +for each row execute procedure triggertest(); create trigger trigtest_b_stmt_tg_dirtable_1 before insert or update or delete on dir_table1 -for each statement execute procedure trigtest(); +for each statement execute procedure triggertest(); ERROR: Triggers for statements are not yet supported create trigger trigtest_a_stmt_tg_dirtable_1 after insert or update or delete on dir_table1 -for each statement execute procedure trigtest(); +for each statement execute procedure triggertest(); ERROR: Triggers for statements are not yet supported -- Test COPY DIRECTORY TABLE syntax SELECT relative_path, size, tag FROM dir_table1 ORDER BY 1; @@ -747,7 +764,7 @@ ERROR: Only support copy binary from directory table. \COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv'; -- fail ERROR: Copy from directory table file name can't be null. \COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation1'; -NOTICE: dir_table1 INSERT AFTER ROW (seg1 127.0.1.1:7003 pid=15072) +NOTICE: dir_table1 INSERT AFTER ROW (seg1 127.0.1.1:7003 pid=31193) \COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation1'; -- fail ERROR: duplicate key value violates unique constraint "dir_table1_pkey" DETAIL: Key (relative_path)=(nation1) already exists. @@ -756,9 +773,9 @@ ERROR: syntax error at or near "'nation2'" LINE 1: COPY BINARY dir_table1 FROM STDIN 'nation2' 'nation2'; -- fa... ^ \COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation2'; -NOTICE: dir_table1 INSERT AFTER ROW (seg2 127.0.1.1:7004 pid=15073) +NOTICE: dir_table1 INSERT AFTER ROW (seg2 127.0.1.1:7004 pid=31192) \COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 'nation'; -NOTICE: dir_table1 INSERT AFTER ROW (seg1 127.0.1.1:7003 pid=15072) +NOTICE: dir_table1 INSERT AFTER ROW (seg1 127.0.1.1:7003 pid=31193) \COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 'nation'; -- fail ERROR: duplicate key value violates unique constraint "dir_table1_pkey" DETAIL: Key (relative_path)=(nation3) already exists. @@ -766,7 +783,7 @@ DETAIL: Key (relative_path)=(nation3) already exists. ERROR: duplicate key value violates unique constraint "dir_table1_pkey" DETAIL: Key (relative_path)=(nation3) already exists. \COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation4' WITH TAG 'nation'; -NOTICE: dir_table1 INSERT AFTER ROW (seg2 127.0.1.1:7004 pid=15073) +NOTICE: dir_table1 INSERT AFTER ROW (seg2 127.0.1.1:7004 pid=31192) \COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation5' WITH TAG 'nation' WITH TAG 'nation2'; -- fail ERROR: syntax error at or near "WITH" LINE 1: ...dir_table1 FROM STDIN 'nation5' WITH TAG 'nation' WITH TAG '... @@ -789,26 +806,24 @@ SELECT relative_path, content FROM directory_table('dir_table1') ORDER BY 1; nation4 | \x307c414c47455249417c307c20686167676c652e206361726566756c6c792066696e616c206465706f736974732064657465637420736c796c7920616761690a317c415247454e54494e417c317c616c20666f7865732070726f6d69736520736c796c79206163636f7264696e6720746f2074686520726567756c6172206163636f756e74732e20626f6c6420726571756573747320616c6f6e0a327c4252415a494c7c317c7920616c6f6e6773696465206f66207468652070656e64696e67206465706f736974732e206361726566756c6c79207370656369616c207061636b61676573206172652061626f7574207468652069726f6e696320666f726765732e20736c796c79207370656369616c200a337c43414e4144417c317c6561732068616e672069726f6e69632c2073696c656e74207061636b616765732e20736c796c7920726567756c6172207061636b616765732061726520667572696f75736c79206f76657220746865207469746865732e20666c756666696c7920626f6c640a347c45475950547c347c792061626f766520746865206361726566756c6c7920756e757375616c207468656f646f6c697465732e2066696e616c206475676f7574732061726520717569636b6c79206163726f73732074686520667572696f75736c7920726567756c617220640a357c455448494f5049417c307c76656e207061636b616765732077616b6520717569636b6c792e20726567750a367c4652414e43457c337c726566756c6c792066696e616c2072657175657374732e20726567756c61722c2069726f6e690a377c4745524d414e597c337c6c20706c6174656c6574732e20726567756c6172206163636f756e747320782d7261793a20756e757375616c2c20726567756c6172206163636f0a387c494e4449417c327c737320657863757365732063616a6f6c6520736c796c79206163726f737320746865207061636b616765732e206465706f73697473207072696e742061726f756e0a397c494e444f4e455349417c327c20736c796c792065787072657373206173796d70746f7465732e20726567756c6172206465706f7369747320686167676c6520736c796c792e206361726566756c6c792069726f6e696320686f636b657920706c617965727320736c65657020626c697468656c792e206361726566756c6c0a31307c4952414e7c347c6566756c6c7920616c6f6e6773696465206f662074686520736c796c792066696e616c20646570656e64656e636965732e200a31317c495241517c347c6e6963206465706f7369747320626f6f73742061746f702074686520717569636b6c792066696e616c2072657175657374733f20717569636b6c7920726567756c610a31327c4a4150414e7c327c6f75736c792e2066696e616c2c20657870726573732067696674732063616a6f6c6520610a31337c4a4f5244414e7c347c6963206465706f736974732061726520626c697468656c792061626f757420746865206361726566756c6c7920726567756c61722070610a31347c4b454e59417c307c2070656e64696e67206578637573657320686167676c6520667572696f75736c79206465706f736974732e2070656e64696e672c20657870726573732070696e746f206265616e732077616b6520666c756666696c79207061737420740a31357c4d4f524f43434f7c307c726e732e20626c697468656c7920626f6c6420636f7572747320616d6f6e672074686520636c6f73656c7920726567756c6172207061636b616765732075736520667572696f75736c7920626f6c6420706c6174656c6574733f0a31367c4d4f5a414d42495155457c307c732e2069726f6e69632c20756e757375616c206173796d70746f7465732077616b6520626c697468656c7920720a31377c504552557c317c706c6174656c6574732e20626c697468656c792070656e64696e6720646570656e64656e636965732075736520666c756666696c79206163726f737320746865206576656e2070696e746f206265616e732e206361726566756c6c792073696c656e74206163636f756e0a31387c4348494e417c327c6320646570656e64656e636965732e20667572696f75736c792065787072657373206e6f746f726e697320736c65657020736c796c7920726567756c6172206163636f756e74732e20696465617320736c6565702e206465706f730a31397c524f4d414e49417c337c756c6172206173796d70746f746573206172652061626f75742074686520667572696f7573206d756c7469706c696572732e206578707265737320646570656e64656e63696573206e61672061626f7665207468652069726f6e6963616c6c792069726f6e6963206163636f756e740a32307c5341554449204152414249417c347c74732e2073696c656e7420726571756573747320686167676c652e20636c6f73656c792065787072657373207061636b6167657320736c656570206163726f73732074686520626c697468656c790a32317c564945544e414d7c327c68656c7920656e746963696e676c792065787072657373206163636f756e74732e206576656e2c2066696e616c200a32327c5255535349417c337c20726571756573747320616761696e73742074686520706c6174656c65747320757365206e65766572206163636f7264696e6720746f2074686520717569636b6c7920726567756c61722070696e740a32337c554e49544544204b494e47444f4d7c337c65616e7320626f6f7374206361726566756c6c79207370656369616c2072657175657374732e206163636f756e7473206172652e206361726566756c6c0a32347c554e49544544205354415445537c317c792066696e616c207061636b616765732e20736c6f7720666f7865732063616a6f6c6520717569636b6c792e20717569636b6c792073696c656e7420706c6174656c657473206272656163682069726f6e6963206163636f756e74732e20756e757375616c2070696e746f2062650a (4 rows) -COPY dir_table2 FROM '@abs_srcdir@/data/nation.csv'; -- fail +COPY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv'; -- fail ERROR: Copy from directory table file name can't be null. -COPY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation'; -- fail +COPY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation'; -- fail ERROR: Only support copy binary from directory table. -COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv'; -- fail -ERROR: Copy from directory table file name can't be null. -COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation1'; -COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation1'; -- fail +COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation1'; +COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation1'; -- fail ERROR: duplicate key value violates unique constraint "dir_table2_pkey" DETAIL: Key (relative_path)=(nation1) already exists. -COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation2'; -COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 'nation'; -COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 'nation'; -- fail +COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation2'; +COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 'nation'; +COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 'nation'; -- fail ERROR: duplicate key value violates unique constraint "dir_table2_pkey" DETAIL: Key (relative_path)=(nation3) already exists. -COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 'nation2'; -- fail +COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 'nation2'; -- fail ERROR: duplicate key value violates unique constraint "dir_table2_pkey" DETAIL: Key (relative_path)=(nation3) already exists. -COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation4' WITH TAG 'nation'; -COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation5' WITH TAG 'nation' WITH TAG 'nation2'; -- fail +COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation4' WITH TAG 'nation'; +COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation5' WITH TAG 'nation' WITH TAG 'nation2'; -- fail ERROR: syntax error at or near "WITH" LINE 1: ...ress/data/nation.csv' 'nation5' WITH TAG 'nation' WITH TAG '... ^ @@ -1006,23 +1021,135 @@ SELECT md5_equal('dir_table2', 'nation4'); t (1 row) --- Does not support copy to +-- Test Copy To directory table \COPY dir_table1 TO '@abs_srcdir@/data/dir_table1'; -- fail -ERROR: cannot copy from non-table relation "dir_table1" +ERROR: COPY to directory table must specify the relative_path name. \COPY BINARY dir_table1 TO '@abs_srcdir@/data/dir_table1'; -- fail -ERROR: cannot copy from non-table relation "dir_table1" +ERROR: COPY to directory table must specify the relative_path name. COPY dir_table1 TO '@abs_srcdir@/data/dir_table1'; -- fail -ERROR: cannot copy from non-table relation "dir_table1" +ERROR: COPY to directory table must specify the relative_path name. COPY BINARY dir_table1 TO '@abs_srcdir@/data/dir_table1'; -- fail -ERROR: cannot copy from non-table relation "dir_table1" +ERROR: COPY to directory table must specify the relative_path name. \COPY dir_table2 TO '@abs_srcdir@/data/dir_table2'; -- fail -ERROR: cannot copy from non-table relation "dir_table2" +ERROR: COPY to directory table must specify the relative_path name. \COPY BINARY dir_table2 TO '@abs_srcdir@/data/dir_table2'; -- fail -ERROR: cannot copy from non-table relation "dir_table2" +ERROR: COPY to directory table must specify the relative_path name. COPY dir_table2 TO '@abs_srcdir@/data/dir_table2'; -- fail -ERROR: cannot copy from non-table relation "dir_table2" +ERROR: COPY to directory table must specify the relative_path name. COPY BINARY dir_table2 TO '@abs_srcdir@/data/dir_table2'; -- fail -ERROR: cannot copy from non-table relation "dir_table2" +ERROR: COPY to directory table must specify the relative_path name. +\COPY BINARY dir_table1 TO '@abs_srcdir@/data/dir_table1'; -- fail +ERROR: COPY to directory table must specify the relative_path name. +COPY BINARY dir_table1 TO '@abs_srcdir@/data/dir_table1'; -- fail +ERROR: COPY to directory table must specify the relative_path name. +\COPY BINARY DIRECTORY TABLE dir_table1 'nation1' TO '@abs_srcdir@/data/nation1'; -- OK +COPY BINARY DIRECTORY TABLE dir_table1 'nation1' TO '@abs_srcdir@/data/nation1'; -- OK +\COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO stdin; -- OK +0|ALGERIA|0| haggle. carefully final deposits detect slyly agai +1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon +2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special +3|CANADA|1|eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold +4|EGYPT|4|y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d +5|ETHIOPIA|0|ven packages wake quickly. regu +6|FRANCE|3|refully final requests. regular, ironi +7|GERMANY|3|l platelets. regular accounts x-ray: unusual, regular acco +8|INDIA|2|ss excuses cajole slyly across the packages. deposits print aroun +9|INDONESIA|2| slyly express asymptotes. regular deposits haggle slyly. carefully ironic hockey players sleep blithely. carefull +10|IRAN|4|efully alongside of the slyly final dependencies. +11|IRAQ|4|nic deposits boost atop the quickly final requests? quickly regula +12|JAPAN|2|ously. final, express gifts cajole a +13|JORDAN|4|ic deposits are blithely about the carefully regular pa +14|KENYA|0| pending excuses haggle furiously deposits. pending, express pinto beans wake fluffily past t +15|MOROCCO|0|rns. blithely bold courts among the closely regular packages use furiously bold platelets? +16|MOZAMBIQUE|0|s. ironic, unusual asymptotes wake blithely r +17|PERU|1|platelets. blithely pending dependencies use fluffily across the even pinto beans. carefully silent accoun +18|CHINA|2|c dependencies. furiously express notornis sleep slyly regular accounts. ideas sleep. depos +19|ROMANIA|3|ular asymptotes are about the furious multipliers. express dependencies nag above the ironically ironic account +20|SAUDI ARABIA|4|ts. silent requests haggle. closely express packages sleep across the blithely +21|VIETNAM|2|hely enticingly express accounts. even, final +22|RUSSIA|3| requests against the platelets use never according to the quickly regular pint +23|UNITED KINGDOM|3|eans boost carefully special requests. accounts are. carefull +24|UNITED STATES|1|y final packages. slow foxes cajole quickly. quickly silent platelets breach ironic accounts. unusual pinto be +COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO stdin; -- OK +0|ALGERIA|0| haggle. carefully final deposits detect slyly agai +1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon +2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special +3|CANADA|1|eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold +4|EGYPT|4|y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d +5|ETHIOPIA|0|ven packages wake quickly. regu +6|FRANCE|3|refully final requests. regular, ironi +7|GERMANY|3|l platelets. regular accounts x-ray: unusual, regular acco +8|INDIA|2|ss excuses cajole slyly across the packages. deposits print aroun +9|INDONESIA|2| slyly express asymptotes. regular deposits haggle slyly. carefully ironic hockey players sleep blithely. carefull +10|IRAN|4|efully alongside of the slyly final dependencies. +11|IRAQ|4|nic deposits boost atop the quickly final requests? quickly regula +12|JAPAN|2|ously. final, express gifts cajole a +13|JORDAN|4|ic deposits are blithely about the carefully regular pa +14|KENYA|0| pending excuses haggle furiously deposits. pending, express pinto beans wake fluffily past t +15|MOROCCO|0|rns. blithely bold courts among the closely regular packages use furiously bold platelets? +16|MOZAMBIQUE|0|s. ironic, unusual asymptotes wake blithely r +17|PERU|1|platelets. blithely pending dependencies use fluffily across the even pinto beans. carefully silent accoun +18|CHINA|2|c dependencies. furiously express notornis sleep slyly regular accounts. ideas sleep. depos +19|ROMANIA|3|ular asymptotes are about the furious multipliers. express dependencies nag above the ironically ironic account +20|SAUDI ARABIA|4|ts. silent requests haggle. closely express packages sleep across the blithely +21|VIETNAM|2|hely enticingly express accounts. even, final +22|RUSSIA|3| requests against the platelets use never according to the quickly regular pint +23|UNITED KINGDOM|3|eans boost carefully special requests. accounts are. carefull +24|UNITED STATES|1|y final packages. slow foxes cajole quickly. quickly silent platelets breach ironic accounts. unusual pinto be +\COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO stdout; -- OK +0|ALGERIA|0| haggle. carefully final deposits detect slyly agai +1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon +2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special +3|CANADA|1|eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold +4|EGYPT|4|y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d +5|ETHIOPIA|0|ven packages wake quickly. regu +6|FRANCE|3|refully final requests. regular, ironi +7|GERMANY|3|l platelets. regular accounts x-ray: unusual, regular acco +8|INDIA|2|ss excuses cajole slyly across the packages. deposits print aroun +9|INDONESIA|2| slyly express asymptotes. regular deposits haggle slyly. carefully ironic hockey players sleep blithely. carefull +10|IRAN|4|efully alongside of the slyly final dependencies. +11|IRAQ|4|nic deposits boost atop the quickly final requests? quickly regula +12|JAPAN|2|ously. final, express gifts cajole a +13|JORDAN|4|ic deposits are blithely about the carefully regular pa +14|KENYA|0| pending excuses haggle furiously deposits. pending, express pinto beans wake fluffily past t +15|MOROCCO|0|rns. blithely bold courts among the closely regular packages use furiously bold platelets? +16|MOZAMBIQUE|0|s. ironic, unusual asymptotes wake blithely r +17|PERU|1|platelets. blithely pending dependencies use fluffily across the even pinto beans. carefully silent accoun +18|CHINA|2|c dependencies. furiously express notornis sleep slyly regular accounts. ideas sleep. depos +19|ROMANIA|3|ular asymptotes are about the furious multipliers. express dependencies nag above the ironically ironic account +20|SAUDI ARABIA|4|ts. silent requests haggle. closely express packages sleep across the blithely +21|VIETNAM|2|hely enticingly express accounts. even, final +22|RUSSIA|3| requests against the platelets use never according to the quickly regular pint +23|UNITED KINGDOM|3|eans boost carefully special requests. accounts are. carefull +24|UNITED STATES|1|y final packages. slow foxes cajole quickly. quickly silent platelets breach ironic accounts. unusual pinto be +COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO stdout; -- OK +0|ALGERIA|0| haggle. carefully final deposits detect slyly agai +1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon +2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special +3|CANADA|1|eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold +4|EGYPT|4|y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d +5|ETHIOPIA|0|ven packages wake quickly. regu +6|FRANCE|3|refully final requests. regular, ironi +7|GERMANY|3|l platelets. regular accounts x-ray: unusual, regular acco +8|INDIA|2|ss excuses cajole slyly across the packages. deposits print aroun +9|INDONESIA|2| slyly express asymptotes. regular deposits haggle slyly. carefully ironic hockey players sleep blithely. carefull +10|IRAN|4|efully alongside of the slyly final dependencies. +11|IRAQ|4|nic deposits boost atop the quickly final requests? quickly regula +12|JAPAN|2|ously. final, express gifts cajole a +13|JORDAN|4|ic deposits are blithely about the carefully regular pa +14|KENYA|0| pending excuses haggle furiously deposits. pending, express pinto beans wake fluffily past t +15|MOROCCO|0|rns. blithely bold courts among the closely regular packages use furiously bold platelets? +16|MOZAMBIQUE|0|s. ironic, unusual asymptotes wake blithely r +17|PERU|1|platelets. blithely pending dependencies use fluffily across the even pinto beans. carefully silent accoun +18|CHINA|2|c dependencies. furiously express notornis sleep slyly regular accounts. ideas sleep. depos +19|ROMANIA|3|ular asymptotes are about the furious multipliers. express dependencies nag above the ironically ironic account +20|SAUDI ARABIA|4|ts. silent requests haggle. closely express packages sleep across the blithely +21|VIETNAM|2|hely enticingly express accounts. even, final +22|RUSSIA|3| requests against the platelets use never according to the quickly regular pint +23|UNITED KINGDOM|3|eans boost carefully special requests. accounts are. carefull +24|UNITED STATES|1|y final packages. slow foxes cajole quickly. quickly silent platelets breach ironic accounts. unusual pinto be +\COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO PROGRAM 'gzip -c -1 > @abs_srcdir@/data/nation2.gz'; -- OK +COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO PROGRAM 'gzip -c -1 > @abs_srcdir@/data/nation2.gz'; -- OK SELECT relative_path, size, tag FROM dir_table1 ORDER BY 1; relative_path | size | tag ---------------+------+-------- @@ -1046,8 +1173,8 @@ ANALYZE dir_table1; ANALYZE dir_table2; EXPLAIN (COSTS OFF) SELECT dir_table1.relative_path FROM dir_table1, dir_table2 WHERE dir_table1.relative_path = dir_table2.relative_path ORDER BY 1; - QUERY PLAN --------------------------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------- Gather Motion 3:1 (slice1; segments: 3) Merge Key: dir_table1.relative_path -> Sort @@ -1057,7 +1184,7 @@ WHERE dir_table1.relative_path = dir_table2.relative_path ORDER BY 1; -> Seq Scan on dir_table1 -> Hash -> Seq Scan on dir_table2 - Optimizer: Postgres query optimizer + Optimizer: Pivotal Optimizer (GPORCA) (10 rows) SELECT dir_table1.relative_path FROM dir_table1, dir_table2 @@ -1072,8 +1199,8 @@ WHERE dir_table1.relative_path = dir_table2.relative_path ORDER BY 1; EXPLAIN (COSTS OFF) SELECT dir_table1.relative_path FROM dir_table1, dir_table2 WHERE dir_table1.size = dir_table2.size ORDER BY 1 LIMIT 1; - QUERY PLAN ------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------- Limit -> Gather Motion 3:1 (slice1; segments: 3) Merge Key: dir_table1.relative_path @@ -1089,7 +1216,7 @@ WHERE dir_table1.size = dir_table2.size ORDER BY 1 LIMIT 1; -> Redistribute Motion 3:3 (slice3; segments: 3) Hash Key: dir_table2.size -> Seq Scan on dir_table2 - Optimizer: Postgres query optimizer + Optimizer: Pivotal Optimizer (GPORCA) (16 rows) SELECT dir_table1.relative_path FROM dir_table1, dir_table2 @@ -1101,8 +1228,8 @@ WHERE dir_table1.size = dir_table2.size ORDER BY 1 LIMIT 1; EXPLAIN (COSTS OFF) SELECT dir_table1.relative_path FROM dir_table1, dir_table2 WHERE dir_table1.md5 = dir_table2.md5 ORDER BY 1 LIMIT 1; - QUERY PLAN ------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------ Limit -> Gather Motion 3:1 (slice1; segments: 3) Merge Key: dir_table1.relative_path @@ -1118,7 +1245,6 @@ WHERE dir_table1.md5 = dir_table2.md5 ORDER BY 1 LIMIT 1; -> Redistribute Motion 3:3 (slice3; segments: 3) Hash Key: dir_table2.md5 -> Seq Scan on dir_table2 - Optimizer: Postgres query optimizer (16 rows) SELECT dir_table1.relative_path FROM dir_table1, dir_table2 @@ -1130,8 +1256,8 @@ WHERE dir_table1.md5 = dir_table2.md5 ORDER BY 1 LIMIT 1; EXPLAIN (COSTS OFF) SELECT dir_table1.relative_path FROM dir_table1, dir_table2 WHERE dir_table1.tag = dir_table2.tag ORDER BY 1; - QUERY PLAN ---------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------ Gather Motion 3:1 (slice1; segments: 3) Merge Key: dir_table1.relative_path -> Sort @@ -1142,7 +1268,7 @@ WHERE dir_table1.tag = dir_table2.tag ORDER BY 1; -> Seq Scan on dir_table2 -> Hash -> Seq Scan on dir_table1 - Optimizer: Postgres query optimizer + Optimizer: Pivotal Optimizer (GPORCA) (11 rows) SELECT dir_table1.relative_path FROM dir_table1, dir_table2 @@ -1217,17 +1343,17 @@ ERROR: Only allow to update directory "tag" column. UPDATE dir_table2 SET md5 = '70f09140d1b83eb3ecf9a0e28494d2a4' WHERE relative_path = 'nation4'; -- fail ERROR: Only allow to update directory "tag" column. UPDATE dir_table1 SET tag = 'nation_new_tag'; -- ok -NOTICE: dir_table1 UPDATE BEFORE ROW (seg1 127.0.1.1:7003 pid=15072) -NOTICE: dir_table1 UPDATE BEFORE ROW (seg1 127.0.1.1:7003 pid=15072) -NOTICE: dir_table1 UPDATE BEFORE ROW (seg2 127.0.1.1:7004 pid=15073) -NOTICE: dir_table1 UPDATE AFTER ROW (seg1 127.0.1.1:7003 pid=15072) -NOTICE: dir_table1 UPDATE AFTER ROW (seg1 127.0.1.1:7003 pid=15072) -NOTICE: dir_table1 UPDATE BEFORE ROW (seg2 127.0.1.1:7004 pid=15073) -NOTICE: dir_table1 UPDATE AFTER ROW (seg2 127.0.1.1:7004 pid=15073) -NOTICE: dir_table1 UPDATE AFTER ROW (seg2 127.0.1.1:7004 pid=15073) +NOTICE: dir_table1 UPDATE BEFORE ROW (seg1 127.0.1.1:7003 pid=31193) +NOTICE: dir_table1 UPDATE BEFORE ROW (seg2 127.0.1.1:7004 pid=31192) +NOTICE: dir_table1 UPDATE BEFORE ROW (seg1 127.0.1.1:7003 pid=31193) +NOTICE: dir_table1 UPDATE BEFORE ROW (seg2 127.0.1.1:7004 pid=31192) +NOTICE: dir_table1 UPDATE AFTER ROW (seg1 127.0.1.1:7003 pid=31193) +NOTICE: dir_table1 UPDATE AFTER ROW (seg2 127.0.1.1:7004 pid=31192) +NOTICE: dir_table1 UPDATE AFTER ROW (seg1 127.0.1.1:7003 pid=31193) +NOTICE: dir_table1 UPDATE AFTER ROW (seg2 127.0.1.1:7004 pid=31192) UPDATE dir_table1 SET tag = 'nation2_new_tag' WHERE relative_path = 'nation2'; -- ok -NOTICE: dir_table1 UPDATE BEFORE ROW (seg2 127.0.1.1:7004 pid=15073) -NOTICE: dir_table1 UPDATE AFTER ROW (seg2 127.0.1.1:7004 pid=15073) +NOTICE: dir_table1 UPDATE BEFORE ROW (seg2 127.0.1.1:7004 pid=31192) +NOTICE: dir_table1 UPDATE AFTER ROW (seg2 127.0.1.1:7004 pid=31192) UPDATE dir_table2 SET tag = 'nation4_new_tag' WHERE relative_path = 'nation3'; -- ok UPDATE dir_table1 SET tag = 'failed_tag' WHERE relative_path = 'not_exist_path'; UPDATE dir_table2 SET tag = 'no_tag' WHERE relative_path = 'not_exist_path'; @@ -1759,7 +1885,7 @@ DROP DIRECTORY TABLE IF EXISTS dir_table4; DROP DIRECTORY TABLE IF EXISTS dir_table5; NOTICE: directory table "dir_table5" does not exist, skipping DROP DIRECTORY TABLE IF EXISTS dir_table6; -DROP FUNCTION IF EXISTS trigtest; +DROP FUNCTION IF EXISTS triggertest; DROP STORAGE USER MAPPING IF EXISTS FOR CURRENT_USER STORAGE SERVER oss_server1; DROP STORAGE USER MAPPING IF EXISTS FOR CURRENT_USER STORAGE SERVER oss_server2; NOTICE: storage user mapping for "gpadmin" does not exist for storage server "oss_server2", skipping diff --git a/src/test/regress/output/directory_table_optimizer.source b/src/test/regress/output/directory_table_optimizer.source index 61d7d42d08f..96f06814616 100644 --- a/src/test/regress/output/directory_table_optimizer.source +++ b/src/test/regress/output/directory_table_optimizer.source @@ -552,7 +552,7 @@ SELECT relname, relisshared, relpersistence, relkind FROM pg_class WHERE relname (12 rows) \d+ dir_table1; - Directory able "public.dir_table1" + Directory table "public.dir_table1" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------------+--------------------------+-----------+----------+---------+----------+--------------+------------- relative_path | text | | | | extended | | @@ -565,7 +565,7 @@ Indexes: Distributed by: (relative_path) \d+ dir_table2; - Directory able "public.dir_table2" + Directory table "public.dir_table2" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------------+--------------------------+-----------+----------+---------+----------+--------------+------------- relative_path | text | | | | extended | | @@ -578,7 +578,7 @@ Indexes: Distributed by: (relative_path) \d+ dir_table3; - Directory able "public.dir_table3" + Directory table "public.dir_table3" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------------+--------------------------+-----------+----------+---------+----------+--------------+------------- relative_path | text | | | | extended | | @@ -659,18 +659,13 @@ SELECT relname, relisshared, relpersistence, relkind FROM pg_class WHERE relname \c regression -- Test CREATE/DROP/REINDEX on DIRECTORY SCHEMA TABLE -- Test CREATE INDEX on DIRECTORY SCHEMA TABLE -CREATE INDEX dirtable1_relative_path_idx on dir_table1(relative_path); -- fail -ERROR: Disallowed to create index on directory table "dir_table1". (indexcmds.c:709) -CREATE INDEX dirtable1_size_idx on dir_table1(size); -- fail -ERROR: Disallowed to create index on directory table "dir_table1". (indexcmds.c:709) -CREATE INDEX dirtable1_last_modified_idx on dir_table1(last_modified); -- fail -ERROR: Disallowed to create index on directory table "dir_table1". (indexcmds.c:709) -CREATE INDEX dirtable1_md5_idx on dir_table1(md5); -- fail -ERROR: Disallowed to create index on directory table "dir_table1". (indexcmds.c:709) -CREATE INDEX dirtable1_tag_idx on dir_table1(tag); -- fail -ERROR: Disallowed to create index on directory table "dir_table1". (indexcmds.c:709) +CREATE INDEX dirtable1_relative_path_idx on dir_table1(relative_path); +CREATE INDEX dirtable1_size_idx on dir_table1(size); +CREATE INDEX dirtable1_last_modified_idx on dir_table1(last_modified); +CREATE INDEX dirtable1_md5_idx on dir_table1(md5); +CREATE INDEX dirtable1_tag_idx on dir_table1(tag); \d+ dir_table1; - Directory able "public.dir_table1" + Directory table "public.dir_table1" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------------+--------------------------+-----------+----------+---------+----------+--------------+------------- relative_path | text | | | | extended | | @@ -680,21 +675,44 @@ ERROR: Disallowed to create index on directory table "dir_table1". (indexcmds.c tag | text | | | | extended | | Indexes: "dir_table1_pkey" PRIMARY KEY, btree (relative_path) + "dirtable1_last_modified_idx" btree (last_modified) + "dirtable1_md5_idx" btree (md5) + "dirtable1_relative_path_idx" btree (relative_path) + "dirtable1_size_idx" btree (size) + "dirtable1_tag_idx" btree (tag) Distributed by: (relative_path) -- Test DROP INDEX on DIRECTORY SCHEMA TABLE DROP INDEX dir_table1_pkey; -- fail -ERROR: Disallowed to drop index "dir_table1_pkey" on directory table "dir_table1" +ERROR: Disallowed to drop primary index "dir_table1_pkey" on directory table "dir_table1" DROP INDEX dir_table2_pkey; -- fail -ERROR: Disallowed to drop index "dir_table2_pkey" on directory table "dir_table2" +ERROR: Disallowed to drop primary index "dir_table2_pkey" on directory table "dir_table2" DROP INDEX dir_table3_pkey; -- fail -ERROR: Disallowed to drop index "dir_table3_pkey" on directory table "dir_table3" +ERROR: Disallowed to drop primary index "dir_table3_pkey" on directory table "dir_table3" DROP INDEX dir_table4_pkey; -- fail ERROR: index "dir_table4_pkey" does not exist DROP INDEX dir_table5_pkey; -- fail ERROR: index "dir_table5_pkey" does not exist DROP INDEX dir_table6_pkey; -- fail -ERROR: Disallowed to drop index "dir_table6_pkey" on directory table "dir_table6" +ERROR: Disallowed to drop primary index "dir_table6_pkey" on directory table "dir_table6" +DROP INDEX dirtable1_relative_path_idx; -- OK +DROP INDEX dirtable1_size_idx; -- OK +DROP INDEX dirtable1_last_modified_idx; -- OK +DROP INDEX dirtable1_md5_idx; -- OK +DROP INDEX dirtable1_tag_idx; -- OK +\d+ dir_table1; + Directory table "public.dir_table1" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +---------------+--------------------------+-----------+----------+---------+----------+--------------+------------- + relative_path | text | | | | extended | | + size | bigint | | | | plain | | + last_modified | timestamp with time zone | | | | plain | | + md5 | text | | | | extended | | + tag | text | | | | extended | | +Indexes: + "dir_table1_pkey" PRIMARY KEY, btree (relative_path) +Distributed by: (relative_path) + -- Test REINDEX on DIRECTORY SCHEMA TABLE REINDEX INDEX dir_table1_pkey; REINDEX INDEX dir_table2_pkey; @@ -713,21 +731,20 @@ REINDEX TABLE dir_table5; ERROR: relation "dir_table5" does not exist REINDEX TABLE dir_table6; -- Test triggers -DROP FUNCTION IF EXISTS trigtest; -create function trigtest() returns trigger as $$ +create function triggertest() returns trigger as $$ begin raise notice '% % % %', TG_TABLE_NAME, TG_OP, TG_WHEN, TG_LEVEL; return new; end;$$ language plpgsql; create trigger trigtest_b_row_tg_dirtable_1 before insert or update or delete on dir_table1 -for each row execute procedure trigtest(); +for each row execute procedure triggertest(); create trigger trigtest_a_row_tg_dirtable_1 after insert or update or delete on dir_table1 -for each row execute procedure trigtest(); +for each row execute procedure triggertest(); create trigger trigtest_b_stmt_tg_dirtable_1 before insert or update or delete on dir_table1 -for each statement execute procedure trigtest(); +for each statement execute procedure triggertest(); ERROR: Triggers for statements are not yet supported create trigger trigtest_a_stmt_tg_dirtable_1 after insert or update or delete on dir_table1 -for each statement execute procedure trigtest(); +for each statement execute procedure triggertest(); ERROR: Triggers for statements are not yet supported -- Test COPY DIRECTORY TABLE syntax SELECT relative_path, size, tag FROM dir_table1 ORDER BY 1; @@ -747,7 +764,7 @@ ERROR: Only support copy binary from directory table. \COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv'; -- fail ERROR: Copy from directory table file name can't be null. \COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation1'; -NOTICE: dir_table1 INSERT AFTER ROW (seg1 127.0.1.1:7003 pid=31465) +NOTICE: dir_table1 INSERT AFTER ROW (seg1 127.0.1.1:7003 pid=31193) \COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation1'; -- fail ERROR: duplicate key value violates unique constraint "dir_table1_pkey" DETAIL: Key (relative_path)=(nation1) already exists. @@ -756,9 +773,9 @@ ERROR: syntax error at or near "'nation2'" LINE 1: COPY BINARY dir_table1 FROM STDIN 'nation2' 'nation2'; -- fa... ^ \COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation2'; -NOTICE: dir_table1 INSERT AFTER ROW (seg2 127.0.1.1:7004 pid=31466) +NOTICE: dir_table1 INSERT AFTER ROW (seg2 127.0.1.1:7004 pid=31192) \COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 'nation'; -NOTICE: dir_table1 INSERT AFTER ROW (seg1 127.0.1.1:7003 pid=31465) +NOTICE: dir_table1 INSERT AFTER ROW (seg1 127.0.1.1:7003 pid=31193) \COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 'nation'; -- fail ERROR: duplicate key value violates unique constraint "dir_table1_pkey" DETAIL: Key (relative_path)=(nation3) already exists. @@ -766,7 +783,7 @@ DETAIL: Key (relative_path)=(nation3) already exists. ERROR: duplicate key value violates unique constraint "dir_table1_pkey" DETAIL: Key (relative_path)=(nation3) already exists. \COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation4' WITH TAG 'nation'; -NOTICE: dir_table1 INSERT AFTER ROW (seg2 127.0.1.1:7004 pid=31466) +NOTICE: dir_table1 INSERT AFTER ROW (seg2 127.0.1.1:7004 pid=31192) \COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation5' WITH TAG 'nation' WITH TAG 'nation2'; -- fail ERROR: syntax error at or near "WITH" LINE 1: ...dir_table1 FROM STDIN 'nation5' WITH TAG 'nation' WITH TAG '... @@ -789,26 +806,24 @@ SELECT relative_path, content FROM directory_table('dir_table1') ORDER BY 1; nation4 | \x307c414c47455249417c307c20686167676c652e206361726566756c6c792066696e616c206465706f736974732064657465637420736c796c7920616761690a317c415247454e54494e417c317c616c20666f7865732070726f6d69736520736c796c79206163636f7264696e6720746f2074686520726567756c6172206163636f756e74732e20626f6c6420726571756573747320616c6f6e0a327c4252415a494c7c317c7920616c6f6e6773696465206f66207468652070656e64696e67206465706f736974732e206361726566756c6c79207370656369616c207061636b61676573206172652061626f7574207468652069726f6e696320666f726765732e20736c796c79207370656369616c200a337c43414e4144417c317c6561732068616e672069726f6e69632c2073696c656e74207061636b616765732e20736c796c7920726567756c6172207061636b616765732061726520667572696f75736c79206f76657220746865207469746865732e20666c756666696c7920626f6c640a347c45475950547c347c792061626f766520746865206361726566756c6c7920756e757375616c207468656f646f6c697465732e2066696e616c206475676f7574732061726520717569636b6c79206163726f73732074686520667572696f75736c7920726567756c617220640a357c455448494f5049417c307c76656e207061636b616765732077616b6520717569636b6c792e20726567750a367c4652414e43457c337c726566756c6c792066696e616c2072657175657374732e20726567756c61722c2069726f6e690a377c4745524d414e597c337c6c20706c6174656c6574732e20726567756c6172206163636f756e747320782d7261793a20756e757375616c2c20726567756c6172206163636f0a387c494e4449417c327c737320657863757365732063616a6f6c6520736c796c79206163726f737320746865207061636b616765732e206465706f73697473207072696e742061726f756e0a397c494e444f4e455349417c327c20736c796c792065787072657373206173796d70746f7465732e20726567756c6172206465706f7369747320686167676c6520736c796c792e206361726566756c6c792069726f6e696320686f636b657920706c617965727320736c65657020626c697468656c792e206361726566756c6c0a31307c4952414e7c347c6566756c6c7920616c6f6e6773696465206f662074686520736c796c792066696e616c20646570656e64656e636965732e200a31317c495241517c347c6e6963206465706f7369747320626f6f73742061746f702074686520717569636b6c792066696e616c2072657175657374733f20717569636b6c7920726567756c610a31327c4a4150414e7c327c6f75736c792e2066696e616c2c20657870726573732067696674732063616a6f6c6520610a31337c4a4f5244414e7c347c6963206465706f736974732061726520626c697468656c792061626f757420746865206361726566756c6c7920726567756c61722070610a31347c4b454e59417c307c2070656e64696e67206578637573657320686167676c6520667572696f75736c79206465706f736974732e2070656e64696e672c20657870726573732070696e746f206265616e732077616b6520666c756666696c79207061737420740a31357c4d4f524f43434f7c307c726e732e20626c697468656c7920626f6c6420636f7572747320616d6f6e672074686520636c6f73656c7920726567756c6172207061636b616765732075736520667572696f75736c7920626f6c6420706c6174656c6574733f0a31367c4d4f5a414d42495155457c307c732e2069726f6e69632c20756e757375616c206173796d70746f7465732077616b6520626c697468656c7920720a31377c504552557c317c706c6174656c6574732e20626c697468656c792070656e64696e6720646570656e64656e636965732075736520666c756666696c79206163726f737320746865206576656e2070696e746f206265616e732e206361726566756c6c792073696c656e74206163636f756e0a31387c4348494e417c327c6320646570656e64656e636965732e20667572696f75736c792065787072657373206e6f746f726e697320736c65657020736c796c7920726567756c6172206163636f756e74732e20696465617320736c6565702e206465706f730a31397c524f4d414e49417c337c756c6172206173796d70746f746573206172652061626f75742074686520667572696f7573206d756c7469706c696572732e206578707265737320646570656e64656e63696573206e61672061626f7665207468652069726f6e6963616c6c792069726f6e6963206163636f756e740a32307c5341554449204152414249417c347c74732e2073696c656e7420726571756573747320686167676c652e20636c6f73656c792065787072657373207061636b6167657320736c656570206163726f73732074686520626c697468656c790a32317c564945544e414d7c327c68656c7920656e746963696e676c792065787072657373206163636f756e74732e206576656e2c2066696e616c200a32327c5255535349417c337c20726571756573747320616761696e73742074686520706c6174656c65747320757365206e65766572206163636f7264696e6720746f2074686520717569636b6c7920726567756c61722070696e740a32337c554e49544544204b494e47444f4d7c337c65616e7320626f6f7374206361726566756c6c79207370656369616c2072657175657374732e206163636f756e7473206172652e206361726566756c6c0a32347c554e49544544205354415445537c317c792066696e616c207061636b616765732e20736c6f7720666f7865732063616a6f6c6520717569636b6c792e20717569636b6c792073696c656e7420706c6174656c657473206272656163682069726f6e6963206163636f756e74732e20756e757375616c2070696e746f2062650a (4 rows) -COPY dir_table2 FROM '@abs_srcdir@/data/nation.csv'; -- fail +COPY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv'; -- fail ERROR: Copy from directory table file name can't be null. -COPY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation'; -- fail +COPY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation'; -- fail ERROR: Only support copy binary from directory table. -COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv'; -- fail -ERROR: Copy from directory table file name can't be null. -COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation1'; -COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation1'; -- fail +COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation1'; +COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation1'; -- fail ERROR: duplicate key value violates unique constraint "dir_table2_pkey" DETAIL: Key (relative_path)=(nation1) already exists. -COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation2'; -COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 'nation'; -COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 'nation'; -- fail +COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation2'; +COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 'nation'; +COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 'nation'; -- fail ERROR: duplicate key value violates unique constraint "dir_table2_pkey" DETAIL: Key (relative_path)=(nation3) already exists. -COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 'nation2'; -- fail +COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 'nation2'; -- fail ERROR: duplicate key value violates unique constraint "dir_table2_pkey" DETAIL: Key (relative_path)=(nation3) already exists. -COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation4' WITH TAG 'nation'; -COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation5' WITH TAG 'nation' WITH TAG 'nation2'; -- fail +COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation4' WITH TAG 'nation'; +COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation5' WITH TAG 'nation' WITH TAG 'nation2'; -- fail ERROR: syntax error at or near "WITH" LINE 1: ...ress/data/nation.csv' 'nation5' WITH TAG 'nation' WITH TAG '... ^ @@ -1006,23 +1021,135 @@ SELECT md5_equal('dir_table2', 'nation4'); t (1 row) --- Does not support copy to +-- Test Copy To directory table \COPY dir_table1 TO '@abs_srcdir@/data/dir_table1'; -- fail -ERROR: cannot copy from non-table relation "dir_table1" +ERROR: COPY to directory table must specify the relative_path name. \COPY BINARY dir_table1 TO '@abs_srcdir@/data/dir_table1'; -- fail -ERROR: cannot copy from non-table relation "dir_table1" +ERROR: COPY to directory table must specify the relative_path name. COPY dir_table1 TO '@abs_srcdir@/data/dir_table1'; -- fail -ERROR: cannot copy from non-table relation "dir_table1" +ERROR: COPY to directory table must specify the relative_path name. COPY BINARY dir_table1 TO '@abs_srcdir@/data/dir_table1'; -- fail -ERROR: cannot copy from non-table relation "dir_table1" +ERROR: COPY to directory table must specify the relative_path name. \COPY dir_table2 TO '@abs_srcdir@/data/dir_table2'; -- fail -ERROR: cannot copy from non-table relation "dir_table2" +ERROR: COPY to directory table must specify the relative_path name. \COPY BINARY dir_table2 TO '@abs_srcdir@/data/dir_table2'; -- fail -ERROR: cannot copy from non-table relation "dir_table2" +ERROR: COPY to directory table must specify the relative_path name. COPY dir_table2 TO '@abs_srcdir@/data/dir_table2'; -- fail -ERROR: cannot copy from non-table relation "dir_table2" +ERROR: COPY to directory table must specify the relative_path name. COPY BINARY dir_table2 TO '@abs_srcdir@/data/dir_table2'; -- fail -ERROR: cannot copy from non-table relation "dir_table2" +ERROR: COPY to directory table must specify the relative_path name. +\COPY BINARY dir_table1 TO '@abs_srcdir@/data/dir_table1'; -- fail +ERROR: COPY to directory table must specify the relative_path name. +COPY BINARY dir_table1 TO '@abs_srcdir@/data/dir_table1'; -- fail +ERROR: COPY to directory table must specify the relative_path name. +\COPY BINARY DIRECTORY TABLE dir_table1 'nation1' TO '@abs_srcdir@/data/nation1'; -- OK +COPY BINARY DIRECTORY TABLE dir_table1 'nation1' TO '@abs_srcdir@/data/nation1'; -- OK +\COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO stdin; -- OK +0|ALGERIA|0| haggle. carefully final deposits detect slyly agai +1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon +2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special +3|CANADA|1|eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold +4|EGYPT|4|y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d +5|ETHIOPIA|0|ven packages wake quickly. regu +6|FRANCE|3|refully final requests. regular, ironi +7|GERMANY|3|l platelets. regular accounts x-ray: unusual, regular acco +8|INDIA|2|ss excuses cajole slyly across the packages. deposits print aroun +9|INDONESIA|2| slyly express asymptotes. regular deposits haggle slyly. carefully ironic hockey players sleep blithely. carefull +10|IRAN|4|efully alongside of the slyly final dependencies. +11|IRAQ|4|nic deposits boost atop the quickly final requests? quickly regula +12|JAPAN|2|ously. final, express gifts cajole a +13|JORDAN|4|ic deposits are blithely about the carefully regular pa +14|KENYA|0| pending excuses haggle furiously deposits. pending, express pinto beans wake fluffily past t +15|MOROCCO|0|rns. blithely bold courts among the closely regular packages use furiously bold platelets? +16|MOZAMBIQUE|0|s. ironic, unusual asymptotes wake blithely r +17|PERU|1|platelets. blithely pending dependencies use fluffily across the even pinto beans. carefully silent accoun +18|CHINA|2|c dependencies. furiously express notornis sleep slyly regular accounts. ideas sleep. depos +19|ROMANIA|3|ular asymptotes are about the furious multipliers. express dependencies nag above the ironically ironic account +20|SAUDI ARABIA|4|ts. silent requests haggle. closely express packages sleep across the blithely +21|VIETNAM|2|hely enticingly express accounts. even, final +22|RUSSIA|3| requests against the platelets use never according to the quickly regular pint +23|UNITED KINGDOM|3|eans boost carefully special requests. accounts are. carefull +24|UNITED STATES|1|y final packages. slow foxes cajole quickly. quickly silent platelets breach ironic accounts. unusual pinto be +COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO stdin; -- OK +0|ALGERIA|0| haggle. carefully final deposits detect slyly agai +1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon +2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special +3|CANADA|1|eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold +4|EGYPT|4|y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d +5|ETHIOPIA|0|ven packages wake quickly. regu +6|FRANCE|3|refully final requests. regular, ironi +7|GERMANY|3|l platelets. regular accounts x-ray: unusual, regular acco +8|INDIA|2|ss excuses cajole slyly across the packages. deposits print aroun +9|INDONESIA|2| slyly express asymptotes. regular deposits haggle slyly. carefully ironic hockey players sleep blithely. carefull +10|IRAN|4|efully alongside of the slyly final dependencies. +11|IRAQ|4|nic deposits boost atop the quickly final requests? quickly regula +12|JAPAN|2|ously. final, express gifts cajole a +13|JORDAN|4|ic deposits are blithely about the carefully regular pa +14|KENYA|0| pending excuses haggle furiously deposits. pending, express pinto beans wake fluffily past t +15|MOROCCO|0|rns. blithely bold courts among the closely regular packages use furiously bold platelets? +16|MOZAMBIQUE|0|s. ironic, unusual asymptotes wake blithely r +17|PERU|1|platelets. blithely pending dependencies use fluffily across the even pinto beans. carefully silent accoun +18|CHINA|2|c dependencies. furiously express notornis sleep slyly regular accounts. ideas sleep. depos +19|ROMANIA|3|ular asymptotes are about the furious multipliers. express dependencies nag above the ironically ironic account +20|SAUDI ARABIA|4|ts. silent requests haggle. closely express packages sleep across the blithely +21|VIETNAM|2|hely enticingly express accounts. even, final +22|RUSSIA|3| requests against the platelets use never according to the quickly regular pint +23|UNITED KINGDOM|3|eans boost carefully special requests. accounts are. carefull +24|UNITED STATES|1|y final packages. slow foxes cajole quickly. quickly silent platelets breach ironic accounts. unusual pinto be +\COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO stdout; -- OK +0|ALGERIA|0| haggle. carefully final deposits detect slyly agai +1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon +2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special +3|CANADA|1|eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold +4|EGYPT|4|y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d +5|ETHIOPIA|0|ven packages wake quickly. regu +6|FRANCE|3|refully final requests. regular, ironi +7|GERMANY|3|l platelets. regular accounts x-ray: unusual, regular acco +8|INDIA|2|ss excuses cajole slyly across the packages. deposits print aroun +9|INDONESIA|2| slyly express asymptotes. regular deposits haggle slyly. carefully ironic hockey players sleep blithely. carefull +10|IRAN|4|efully alongside of the slyly final dependencies. +11|IRAQ|4|nic deposits boost atop the quickly final requests? quickly regula +12|JAPAN|2|ously. final, express gifts cajole a +13|JORDAN|4|ic deposits are blithely about the carefully regular pa +14|KENYA|0| pending excuses haggle furiously deposits. pending, express pinto beans wake fluffily past t +15|MOROCCO|0|rns. blithely bold courts among the closely regular packages use furiously bold platelets? +16|MOZAMBIQUE|0|s. ironic, unusual asymptotes wake blithely r +17|PERU|1|platelets. blithely pending dependencies use fluffily across the even pinto beans. carefully silent accoun +18|CHINA|2|c dependencies. furiously express notornis sleep slyly regular accounts. ideas sleep. depos +19|ROMANIA|3|ular asymptotes are about the furious multipliers. express dependencies nag above the ironically ironic account +20|SAUDI ARABIA|4|ts. silent requests haggle. closely express packages sleep across the blithely +21|VIETNAM|2|hely enticingly express accounts. even, final +22|RUSSIA|3| requests against the platelets use never according to the quickly regular pint +23|UNITED KINGDOM|3|eans boost carefully special requests. accounts are. carefull +24|UNITED STATES|1|y final packages. slow foxes cajole quickly. quickly silent platelets breach ironic accounts. unusual pinto be +COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO stdout; -- OK +0|ALGERIA|0| haggle. carefully final deposits detect slyly agai +1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon +2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special +3|CANADA|1|eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold +4|EGYPT|4|y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d +5|ETHIOPIA|0|ven packages wake quickly. regu +6|FRANCE|3|refully final requests. regular, ironi +7|GERMANY|3|l platelets. regular accounts x-ray: unusual, regular acco +8|INDIA|2|ss excuses cajole slyly across the packages. deposits print aroun +9|INDONESIA|2| slyly express asymptotes. regular deposits haggle slyly. carefully ironic hockey players sleep blithely. carefull +10|IRAN|4|efully alongside of the slyly final dependencies. +11|IRAQ|4|nic deposits boost atop the quickly final requests? quickly regula +12|JAPAN|2|ously. final, express gifts cajole a +13|JORDAN|4|ic deposits are blithely about the carefully regular pa +14|KENYA|0| pending excuses haggle furiously deposits. pending, express pinto beans wake fluffily past t +15|MOROCCO|0|rns. blithely bold courts among the closely regular packages use furiously bold platelets? +16|MOZAMBIQUE|0|s. ironic, unusual asymptotes wake blithely r +17|PERU|1|platelets. blithely pending dependencies use fluffily across the even pinto beans. carefully silent accoun +18|CHINA|2|c dependencies. furiously express notornis sleep slyly regular accounts. ideas sleep. depos +19|ROMANIA|3|ular asymptotes are about the furious multipliers. express dependencies nag above the ironically ironic account +20|SAUDI ARABIA|4|ts. silent requests haggle. closely express packages sleep across the blithely +21|VIETNAM|2|hely enticingly express accounts. even, final +22|RUSSIA|3| requests against the platelets use never according to the quickly regular pint +23|UNITED KINGDOM|3|eans boost carefully special requests. accounts are. carefull +24|UNITED STATES|1|y final packages. slow foxes cajole quickly. quickly silent platelets breach ironic accounts. unusual pinto be +\COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO PROGRAM 'gzip -c -1 > @abs_srcdir@/data/nation2.gz'; -- OK +COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO PROGRAM 'gzip -c -1 > @abs_srcdir@/data/nation2.gz'; -- OK SELECT relative_path, size, tag FROM dir_table1 ORDER BY 1; relative_path | size | tag ---------------+------+-------- @@ -1212,17 +1339,17 @@ ERROR: Only allow to update directory "tag" column. UPDATE dir_table2 SET md5 = '70f09140d1b83eb3ecf9a0e28494d2a4' WHERE relative_path = 'nation4'; -- fail ERROR: Only allow to update directory "tag" column. UPDATE dir_table1 SET tag = 'nation_new_tag'; -- ok -NOTICE: dir_table1 UPDATE BEFORE ROW (seg1 127.0.1.1:7003 pid=31465) -NOTICE: dir_table1 UPDATE BEFORE ROW (seg1 127.0.1.1:7003 pid=31465) -NOTICE: dir_table1 UPDATE BEFORE ROW (seg2 127.0.1.1:7004 pid=31466) -NOTICE: dir_table1 UPDATE BEFORE ROW (seg2 127.0.1.1:7004 pid=31466) -NOTICE: dir_table1 UPDATE AFTER ROW (seg2 127.0.1.1:7004 pid=31466) -NOTICE: dir_table1 UPDATE AFTER ROW (seg1 127.0.1.1:7003 pid=31465) -NOTICE: dir_table1 UPDATE AFTER ROW (seg1 127.0.1.1:7003 pid=31465) -NOTICE: dir_table1 UPDATE AFTER ROW (seg2 127.0.1.1:7004 pid=31466) +NOTICE: dir_table1 UPDATE BEFORE ROW (seg1 127.0.1.1:7003 pid=31193) +NOTICE: dir_table1 UPDATE BEFORE ROW (seg2 127.0.1.1:7004 pid=31192) +NOTICE: dir_table1 UPDATE BEFORE ROW (seg1 127.0.1.1:7003 pid=31193) +NOTICE: dir_table1 UPDATE BEFORE ROW (seg2 127.0.1.1:7004 pid=31192) +NOTICE: dir_table1 UPDATE AFTER ROW (seg1 127.0.1.1:7003 pid=31193) +NOTICE: dir_table1 UPDATE AFTER ROW (seg2 127.0.1.1:7004 pid=31192) +NOTICE: dir_table1 UPDATE AFTER ROW (seg1 127.0.1.1:7003 pid=31193) +NOTICE: dir_table1 UPDATE AFTER ROW (seg2 127.0.1.1:7004 pid=31192) UPDATE dir_table1 SET tag = 'nation2_new_tag' WHERE relative_path = 'nation2'; -- ok -NOTICE: dir_table1 UPDATE BEFORE ROW (seg2 127.0.1.1:7004 pid=31466) -NOTICE: dir_table1 UPDATE AFTER ROW (seg2 127.0.1.1:7004 pid=31466) +NOTICE: dir_table1 UPDATE BEFORE ROW (seg2 127.0.1.1:7004 pid=31192) +NOTICE: dir_table1 UPDATE AFTER ROW (seg2 127.0.1.1:7004 pid=31192) UPDATE dir_table2 SET tag = 'nation4_new_tag' WHERE relative_path = 'nation3'; -- ok UPDATE dir_table1 SET tag = 'failed_tag' WHERE relative_path = 'not_exist_path'; UPDATE dir_table2 SET tag = 'no_tag' WHERE relative_path = 'not_exist_path'; @@ -1754,7 +1881,7 @@ DROP DIRECTORY TABLE IF EXISTS dir_table4; DROP DIRECTORY TABLE IF EXISTS dir_table5; NOTICE: directory table "dir_table5" does not exist, skipping DROP DIRECTORY TABLE IF EXISTS dir_table6; -DROP FUNCTION IF EXISTS trigtest; +DROP FUNCTION IF EXISTS triggertest; DROP STORAGE USER MAPPING IF EXISTS FOR CURRENT_USER STORAGE SERVER oss_server1; DROP STORAGE USER MAPPING IF EXISTS FOR CURRENT_USER STORAGE SERVER oss_server2; NOTICE: storage user mapping for "gpadmin" does not exist for storage server "oss_server2", skipping