本文整理汇总了C++中pg_malloc函数的典型用法代码示例。如果您正苦于以下问题:C++ pg_malloc函数的具体用法?C++ pg_malloc怎么用?C++ pg_malloc使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pg_malloc函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: sql_exec
/*
* Actual code to make call to the database and print the output data.
*/
int
sql_exec(PGconn *conn, const char *todo, bool quiet)
{
PGresult *res;
int nfields;
int nrows;
int i,
j,
l;
int *length;
char *pad;
/* make the call */
res = PQexec(conn, todo);
/* check and deal with errors */
if (!res || PQresultStatus(res) > 2)
{
fprintf(stderr, "oid2name: query failed: %s\n", PQerrorMessage(conn));
fprintf(stderr, "oid2name: query was: %s\n", todo);
PQclear(res);
PQfinish(conn);
exit(-1);
}
/* get the number of fields */
nrows = PQntuples(res);
nfields = PQnfields(res);
/* for each field, get the needed width */
length = (int *) pg_malloc(sizeof(int) * nfields);
for (j = 0; j < nfields; j++)
length[j] = strlen(PQfname(res, j));
for (i = 0; i < nrows; i++)
{
for (j = 0; j < nfields; j++)
{
l = strlen(PQgetvalue(res, i, j));
if (l > length[j])
length[j] = strlen(PQgetvalue(res, i, j));
}
}
/* print a header */
if (!quiet)
{
for (j = 0, l = 0; j < nfields; j++)
{
fprintf(stdout, "%*s", length[j] + 2, PQfname(res, j));
l += length[j] + 2;
}
fprintf(stdout, "\n");
pad = (char *) pg_malloc(l + 1);
MemSet(pad, '-', l);
pad[l] = '\0';
fprintf(stdout, "%s\n", pad);
free(pad);
}
/* for each row, dump the information */
for (i = 0; i < nrows; i++)
{
for (j = 0; j < nfields; j++)
fprintf(stdout, "%*s", length[j] + 2, PQgetvalue(res, i, j));
fprintf(stdout, "\n");
}
/* cleanup */
PQclear(res);
free(length);
return 0;
}
开发者ID:adam8157,项目名称:gpdb,代码行数:79,代码来源:oid2name.c
示例2: copy_file
static int
copy_file(const char *srcfile, const char *dstfile, bool force)
{
#define COPY_BUF_SIZE (50 * BLCKSZ)
int src_fd;
int dest_fd;
char *buffer;
int ret = 0;
int save_errno = 0;
if ((srcfile == NULL) || (dstfile == NULL))
{
errno = EINVAL;
return -1;
}
if ((src_fd = open(srcfile, O_RDONLY, 0)) < 0)
return -1;
if ((dest_fd = open(dstfile, O_RDWR | O_CREAT | (force ? 0 : O_EXCL), S_IRUSR | S_IWUSR)) < 0)
{
save_errno = errno;
if (src_fd != 0)
close(src_fd);
errno = save_errno;
return -1;
}
buffer = (char *) pg_malloc(COPY_BUF_SIZE);
/* perform data copying i.e read src source, write to destination */
while (true)
{
ssize_t nbytes = read(src_fd, buffer, COPY_BUF_SIZE);
if (nbytes < 0)
{
save_errno = errno;
ret = -1;
break;
}
if (nbytes == 0)
break;
errno = 0;
if (write(dest_fd, buffer, nbytes) != nbytes)
{
/* if write didn't set errno, assume problem is no disk space */
if (errno == 0)
errno = ENOSPC;
save_errno = errno;
ret = -1;
break;
}
}
pg_free(buffer);
if (src_fd != 0)
close(src_fd);
if (dest_fd != 0)
close(dest_fd);
if (save_errno != 0)
errno = save_errno;
return ret;
}
开发者ID:jtrutna,项目名称:postgres,代码行数:74,代码来源:file.c
示例3: get_loadable_libraries
/*
* get_loadable_libraries()
*
* Fetch the names of all old libraries containing C-language functions.
* We will later check that they all exist in the new installation.
*/
void
get_loadable_libraries(void)
{
PGresult **ress;
int totaltups;
int dbnum;
bool found_public_plpython_handler = false;
char *pg83_str;
ress = (PGresult **) pg_malloc(old_cluster.dbarr.ndbs * sizeof(PGresult *));
totaltups = 0;
/*
* gpoptutils was removed during the 5.0 development cycle and the
* functionality is now in backend, skip when checking for loadable
* libraries in 4.3-> upgrades.
*/
if (GET_MAJOR_VERSION(old_cluster.major_version) == 802)
pg83_str = "probin NOT IN ('$libdir/gpoptutils') AND ";
else
pg83_str = "";
/* Fetch all library names, removing duplicates within each DB */
for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
{
DbInfo *active_db = &old_cluster.dbarr.dbs[dbnum];
PGconn *conn = connectToServer(&old_cluster, active_db->db_name);
/*
* Fetch all libraries referenced in this DB. We can't exclude the
* "pg_catalog" schema because, while such functions are not
* explicitly dumped by pg_dump, they do reference implicit objects
* that pg_dump does dump, e.g. CREATE LANGUAGE plperl.
*/
ress[dbnum] = executeQueryOrDie(conn,
"SELECT DISTINCT probin "
"FROM pg_catalog.pg_proc "
"WHERE prolang = 13 /* C */ AND "
"probin IS NOT NULL AND "
" %s "
"oid >= %u;",
pg83_str,
FirstNormalObjectId);
totaltups += PQntuples(ress[dbnum]);
/*
* Systems that install plpython before 8.1 have
* plpython_call_handler() defined in the "public" schema, causing
* pg_dumpall to dump it. However that function still references
* "plpython" (no "2"), so it throws an error on restore. This code
* checks for the problem function, reports affected databases to the
* user and explains how to remove them. 8.1 git commit:
* e0dedd0559f005d60c69c9772163e69c204bac69
* http://archives.postgresql.org/pgsql-hackers/2012-03/msg01101.php
* http://archives.postgresql.org/pgsql-bugs/2012-05/msg00206.php
*/
if (GET_MAJOR_VERSION(old_cluster.major_version) < 901)
{
PGresult *res;
res = executeQueryOrDie(conn,
"SELECT 1 "
"FROM pg_catalog.pg_proc JOIN pg_namespace "
" ON pronamespace = pg_namespace.oid "
"WHERE proname = 'plpython_call_handler' AND "
"nspname = 'public' AND "
"prolang = 13 /* C */ AND "
"probin = '$libdir/plpython' AND "
"pg_proc.oid >= %u;",
FirstNormalObjectId);
if (PQntuples(res) > 0)
{
if (!found_public_plpython_handler)
{
pg_log(PG_WARNING,
"\nThe old cluster has a \"plpython_call_handler\" function defined\n"
"in the \"public\" schema which is a duplicate of the one defined\n"
"in the \"pg_catalog\" schema. You can confirm this by executing\n"
"in psql:\n"
"\n"
" \\df *.plpython_call_handler\n"
"\n"
"The \"public\" schema version of this function was created by a\n"
"pre-8.1 install of plpython, and must be removed for pg_upgrade\n"
"to complete because it references a now-obsolete \"plpython\"\n"
"shared object file. You can remove the \"public\" schema version\n"
"of this function by running the following command:\n"
"\n"
" DROP FUNCTION public.plpython_call_handler()\n"
"\n"
"in each affected database:\n"
"\n");
}
pg_log(PG_WARNING, " %s\n", active_db->db_name);
//.........这里部分代码省略.........
开发者ID:d,项目名称:gpdb,代码行数:101,代码来源:function.c
示例4: parallel_transfer_all_new_dbs
/*
* parallel_transfer_all_new_dbs
*
* This has the same API as transfer_all_new_dbs, except it does parallel execution
* by transferring multiple tablespaces in parallel
*/
void
parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
char *old_pgdata, char *new_pgdata,
char *old_tablespace)
{
#ifndef WIN32
pid_t child;
#else
HANDLE child;
transfer_thread_arg *new_arg;
#endif
if (user_opts.jobs <= 1)
/* throw_error must be true to allow jobs */
transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, new_pgdata, NULL);
else
{
/* parallel */
#ifdef WIN32
if (thread_handles == NULL)
thread_handles = pg_malloc(user_opts.jobs * sizeof(HANDLE));
if (transfer_thread_args == NULL)
{
int i;
transfer_thread_args = pg_malloc(user_opts.jobs * sizeof(transfer_thread_arg *));
/*
* For safety and performance, we keep the args allocated during
* the entire life of the process, and we don't free the args in a
* thread different from the one that allocated it.
*/
for (i = 0; i < user_opts.jobs; i++)
transfer_thread_args[i] = pg_malloc0(sizeof(transfer_thread_arg));
}
cur_thread_args = (void **) transfer_thread_args;
#endif
/* harvest any dead children */
while (reap_child(false) == true)
;
/* must we wait for a dead child? */
if (parallel_jobs >= user_opts.jobs)
reap_child(true);
/* set this before we start the job */
parallel_jobs++;
/* Ensure stdio state is quiesced before forking */
fflush(NULL);
#ifndef WIN32
child = fork();
if (child == 0)
{
transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, new_pgdata,
old_tablespace);
/* if we take another exit path, it will be non-zero */
/* use _exit to skip atexit() functions */
_exit(0);
}
else if (child < 0)
/* fork failed */
pg_fatal("could not create worker process: %s\n", strerror(errno));
#else
/* empty array element are always at the end */
new_arg = transfer_thread_args[parallel_jobs - 1];
/* Can only pass one pointer into the function, so use a struct */
new_arg->old_db_arr = old_db_arr;
new_arg->new_db_arr = new_db_arr;
if (new_arg->old_pgdata)
pg_free(new_arg->old_pgdata);
new_arg->old_pgdata = pg_strdup(old_pgdata);
if (new_arg->new_pgdata)
pg_free(new_arg->new_pgdata);
new_arg->new_pgdata = pg_strdup(new_pgdata);
if (new_arg->old_tablespace)
pg_free(new_arg->old_tablespace);
new_arg->old_tablespace = old_tablespace ? pg_strdup(old_tablespace) : NULL;
child = (HANDLE) _beginthreadex(NULL, 0, (void *) win32_transfer_all_new_dbs,
new_arg, 0, NULL);
if (child == 0)
pg_fatal("could not create worker thread: %s\n", strerror(errno));
thread_handles[parallel_jobs - 1] = child;
#endif
}
return;
}
开发者ID:ArgenBarbie,项目名称:postgresql-9.5.0,代码行数:100,代码来源:parallel.c
示例5: ReadControlFile
/*
* Try to read the existing pg_control file.
*
* This routine is also responsible for updating old pg_control versions
* to the current format. (Currently we don't do anything of the sort.)
*/
static bool
ReadControlFile(void)
{
int fd;
int len;
char *buffer;
pg_crc32 crc;
if ((fd = open(XLOG_CONTROL_FILE, O_RDONLY | PG_BINARY, 0)) < 0)
{
/*
* If pg_control is not there at all, or we can't read it, the odds
* are we've been handed a bad DataDir path, so give up. User can do
* "touch pg_control" to force us to proceed.
*/
fprintf(stderr, _("%s: could not open file \"%s\" for reading: %s\n"),
progname, XLOG_CONTROL_FILE, strerror(errno));
if (errno == ENOENT)
fprintf(stderr, _("If you are sure the data directory path is correct, execute\n"
" touch %s\n"
"and try again.\n"),
XLOG_CONTROL_FILE);
exit(1);
}
/* Use malloc to ensure we have a maxaligned buffer */
buffer = (char *) pg_malloc(PG_CONTROL_SIZE);
len = read(fd, buffer, PG_CONTROL_SIZE);
if (len < 0)
{
fprintf(stderr, _("%s: could not read file \"%s\": %s\n"),
progname, XLOG_CONTROL_FILE, strerror(errno));
exit(1);
}
close(fd);
if (len >= sizeof(ControlFileData) &&
((ControlFileData *) buffer)->pg_control_version == PG_CONTROL_VERSION)
{
/* Check the CRC. */
INIT_CRC32(crc);
COMP_CRC32(crc,
buffer,
offsetof(ControlFileData, crc));
FIN_CRC32(crc);
if (EQ_CRC32(crc, ((ControlFileData *) buffer)->crc))
{
/* Valid data... */
memcpy(&ControlFile, buffer, sizeof(ControlFile));
return true;
}
fprintf(stderr, _("%s: pg_control exists but has invalid CRC; proceed with caution\n"),
progname);
/* We will use the data anyway, but treat it as guessed. */
memcpy(&ControlFile, buffer, sizeof(ControlFile));
guessed = true;
return true;
}
/* Looks like it's a mess. */
fprintf(stderr, _("%s: pg_control exists but is broken or unknown version; ignoring it\n"),
progname);
return false;
}
开发者ID:TarasGit,项目名称:pgsql,代码行数:73,代码来源:pg_resetxlog.c
示例6: strtokx
/*
* Replacement for strtok() (a.k.a. poor man's flex)
*
* Splits a string into tokens, returning one token per call, then NULL
* when no more tokens exist in the given string.
*
* The calling convention is similar to that of strtok, but with more
* frammishes.
*
* s - string to parse, if NULL continue parsing the last string
* whitespace - set of whitespace characters that separate tokens
* delim - set of non-whitespace separator characters (or NULL)
* quote - set of characters that can quote a token (NULL if none)
* escape - character that can quote quotes (0 if none)
* e_strings - if TRUE, treat E'...' syntax as a valid token
* del_quotes - if TRUE, strip quotes from the returned token, else return
* it exactly as found in the string
* encoding - the active character-set encoding
*
* Characters in 'delim', if any, will be returned as single-character
* tokens unless part of a quoted token.
*
* Double occurrences of the quoting character are always taken to represent
* a single quote character in the data. If escape isn't 0, then escape
* followed by anything (except \0) is a data character too.
*
* The combination of e_strings and del_quotes both TRUE is not currently
* handled. This could be fixed but it's not needed anywhere at the moment.
*
* Note that the string s is _not_ overwritten in this implementation.
*
* NB: it's okay to vary delim, quote, and escape from one call to the
* next on a single source string, but changing whitespace is a bad idea
* since you might lose data.
*/
char *
strtokx(const char *s,
const char *whitespace,
const char *delim,
const char *quote,
char escape,
bool e_strings,
bool del_quotes,
int encoding)
{
static char *storage = NULL;/* store the local copy of the users string
* here */
static char *string = NULL; /* pointer into storage where to continue on
* next call */
/* variously abused variables: */
unsigned int offset;
char *start;
char *p;
if (s)
{
free(storage);
/*
* We may need extra space to insert delimiter nulls for adjacent
* tokens. 2X the space is a gross overestimate, but it's unlikely
* that this code will be used on huge strings anyway.
*/
storage = pg_malloc(2 * strlen(s) + 1);
strcpy(storage, s);
string = storage;
}
if (!storage)
return NULL;
/* skip leading whitespace */
offset = strspn(string, whitespace);
start = &string[offset];
/* end of string reached? */
if (*start == '\0')
{
/* technically we don't need to free here, but we're nice */
free(storage);
storage = NULL;
string = NULL;
return NULL;
}
/* test if delimiter character */
if (delim && strchr(delim, *start))
{
/*
* If not at end of string, we need to insert a null to terminate the
* returned token. We can just overwrite the next character if it
* happens to be in the whitespace set ... otherwise move over the
* rest of the string to make room. (This is why we allocated extra
* space above).
*/
p = start + 1;
if (*p != '\0')
{
if (!strchr(whitespace, *p))
//.........这里部分代码省略.........
开发者ID:gservera,项目名称:baseten,代码行数:101,代码来源:stringutils.c
示例7: main
/*
*
* main
*
*/
int
main(int argc, char *argv[])
{
struct adhoc_opts options;
int successResult;
char *password = NULL;
char *password_prompt = NULL;
bool new_pass;
set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("psql"));
if (argc > 1)
{
if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
{
usage();
exit(EXIT_SUCCESS);
}
if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
{
showVersion();
exit(EXIT_SUCCESS);
}
}
#ifdef WIN32
setvbuf(stderr, NULL, _IONBF, 0);
#endif
setup_cancel_handler();
pset.progname = get_progname(argv[0]);
pset.db = NULL;
setDecimalLocale();
pset.encoding = PQenv2encoding();
pset.queryFout = stdout;
pset.queryFoutPipe = false;
pset.cur_cmd_source = stdin;
pset.cur_cmd_interactive = false;
/* We rely on unmentioned fields of pset.popt to start out 0/false/NULL */
pset.popt.topt.format = PRINT_ALIGNED;
pset.popt.topt.border = 1;
pset.popt.topt.pager = 1;
pset.popt.topt.start_table = true;
pset.popt.topt.stop_table = true;
pset.popt.default_footer = true;
/* We must get COLUMNS here before readline() sets it */
pset.popt.topt.env_columns = getenv("COLUMNS") ? atoi(getenv("COLUMNS")) : 0;
pset.notty = (!isatty(fileno(stdin)) || !isatty(fileno(stdout)));
pset.getPassword = TRI_DEFAULT;
EstablishVariableSpace();
SetVariable(pset.vars, "VERSION", PG_VERSION_STR);
/* Default values for variables */
SetVariableBool(pset.vars, "AUTOCOMMIT");
SetVariable(pset.vars, "VERBOSITY", "default");
SetVariable(pset.vars, "PROMPT1", DEFAULT_PROMPT1);
SetVariable(pset.vars, "PROMPT2", DEFAULT_PROMPT2);
SetVariable(pset.vars, "PROMPT3", DEFAULT_PROMPT3);
parse_psql_options(argc, argv, &options);
if (!pset.popt.topt.fieldSep)
pset.popt.topt.fieldSep = pg_strdup(DEFAULT_FIELD_SEP);
if (!pset.popt.topt.recordSep)
pset.popt.topt.recordSep = pg_strdup(DEFAULT_RECORD_SEP);
if (options.username == NULL)
password_prompt = pg_strdup(_("Password: "));
else
{
password_prompt = malloc(strlen(_("Password for user %s: ")) - 2 +
strlen(options.username) + 1);
sprintf(password_prompt, _("Password for user %s: "),
options.username);
}
if (pset.getPassword == TRI_YES)
password = simple_prompt(password_prompt, 100, false);
/* loop until we have a password if requested by backend */
do
{
#define PARAMS_ARRAY_SIZE 7
const char **keywords = pg_malloc(PARAMS_ARRAY_SIZE * sizeof(*keywords));
const char **values = pg_malloc(PARAMS_ARRAY_SIZE * sizeof(*values));
keywords[0] = "host";
values[0] = options.host;
//.........这里部分代码省略.........
开发者ID:badalex,项目名称:postgresql-scratchpad,代码行数:101,代码来源:startup.c
示例8: vacuum_one_database
//.........这里部分代码省略.........
{
appendPQExpBufferStr(&buf,
fmtQualifiedId(PQgetvalue(res, i, 1),
PQgetvalue(res, i, 0)));
if (tables_listed && !PQgetisnull(res, i, 2))
appendPQExpBufferStr(&buf, PQgetvalue(res, i, 2));
simple_string_list_append(&dbtables, buf.data);
resetPQExpBuffer(&buf);
}
termPQExpBuffer(&buf);
PQclear(res);
/*
* If there are more connections than vacuumable relations, we don't need
* to use them all.
*/
if (parallel)
{
if (concurrentCons > ntups)
concurrentCons = ntups;
if (concurrentCons <= 1)
parallel = false;
}
/*
* Setup the database connections. We reuse the connection we already have
* for the first slot. If not in parallel mode, the first slot in the
* array contains the connection.
*/
if (concurrentCons <= 0)
concurrentCons = 1;
slots = (ParallelSlot *) pg_malloc(sizeof(ParallelSlot) * concurrentCons);
init_slot(slots, conn);
if (parallel)
{
for (i = 1; i < concurrentCons; i++)
{
conn = connectDatabase(dbname, host, port, username, prompt_password,
progname, echo, false, true);
init_slot(slots + i, conn);
}
}
/*
* Prepare all the connections to run the appropriate analyze stage, if
* caller requested that mode.
*/
if (stage != ANALYZE_NO_STAGE)
{
int j;
/* We already emitted the message above */
for (j = 0; j < concurrentCons; j++)
executeCommand((slots + j)->connection,
stage_commands[stage], progname, echo);
}
initPQExpBuffer(&sql);
cell = dbtables.head;
do
{
const char *tabname = cell->val;
开发者ID:MasahikoSawada,项目名称:postgresql,代码行数:67,代码来源:vacuumdb.c
示例9: SetWALSegSize
/*
* Try to set the wal segment size from the WAL file specified by WALFilePath.
*
* Return true if size could be determined, false otherwise.
*/
static bool
SetWALSegSize(void)
{
bool ret_val = false;
int fd;
/* malloc this buffer to ensure sufficient alignment: */
char *buf = (char *) pg_malloc(XLOG_BLCKSZ);
Assert(WalSegSz == -1);
if ((fd = open(WALFilePath, O_RDWR, 0)) < 0)
{
fprintf(stderr, "%s: could not open WAL file \"%s\": %s\n",
progname, WALFilePath, strerror(errno));
pg_free(buf);
return false;
}
errno = 0;
if (read(fd, buf, XLOG_BLCKSZ) == XLOG_BLCKSZ)
{
XLogLongPageHeader longhdr = (XLogLongPageHeader) buf;
WalSegSz = longhdr->xlp_seg_size;
if (IsValidWalSegSize(WalSegSz))
{
/* successfully retrieved WAL segment size */
ret_val = true;
}
else
fprintf(stderr,
"%s: WAL segment size must be a power of two between 1MB and 1GB, but the WAL file header specifies %d bytes\n",
progname, WalSegSz);
}
else
{
/*
* Don't complain loudly, this is to be expected for segments being
* created.
*/
if (errno != 0)
{
if (debug)
fprintf(stderr, "could not read file \"%s\": %s\n",
WALFilePath, strerror(errno));
}
else
{
if (debug)
fprintf(stderr, "not enough data in file \"%s\"\n",
WALFilePath);
}
}
fflush(stderr);
close(fd);
pg_free(buf);
return ret_val;
}
开发者ID:AmiGanguli,项目名称:postgres,代码行数:67,代码来源:pg_standby.c
示例10: gen_db_file_maps
/*
* gen_db_file_maps()
*
* generates database mappings for "old_db" and "new_db". Returns a malloc'ed
* array of mappings. nmaps is a return parameter which refers to the number
* mappings.
*
* NOTE: Its the Caller's responsibility to free the returned array.
*/
FileNameMap *
gen_db_file_maps(migratorContext *ctx, DbInfo *old_db, DbInfo *new_db,
int *nmaps, const char *old_pgdata, const char *new_pgdata)
{
FileNameMap *maps;
int relnum;
int num_maps = 0;
maps = (FileNameMap *) pg_malloc(ctx, sizeof(FileNameMap) *
new_db->rel_arr.nrels);
for (relnum = 0; relnum < new_db->rel_arr.nrels; relnum++)
{
RelInfo *newrel = &new_db->rel_arr.rels[relnum];
RelInfo *oldrel;
/* toast tables are handled by their parent */
if (strcmp(newrel->nspname, "pg_toast") == 0)
continue;
oldrel = relarr_lookup_rel(ctx, &(old_db->rel_arr), newrel->nspname,
newrel->relname, CLUSTER_OLD);
map_rel(ctx, oldrel, newrel, old_db, new_db, old_pgdata, new_pgdata,
maps + num_maps);
num_maps++;
/*
* so much for the mapping of this relation. Now we need a mapping for
* its corresponding toast relation if any.
*/
if (oldrel->toastrelid > 0)
{
RelInfo *new_toast;
RelInfo *old_toast;
char new_name[MAXPGPATH];
char old_name[MAXPGPATH];
/* construct the new and old relnames for the toast relation */
snprintf(old_name, sizeof(old_name), "pg_toast_%u",
oldrel->reloid);
snprintf(new_name, sizeof(new_name), "pg_toast_%u",
newrel->reloid);
/* look them up in their respective arrays */
old_toast = relarr_lookup_reloid(ctx, &old_db->rel_arr,
oldrel->toastrelid, CLUSTER_OLD);
new_toast = relarr_lookup_rel(ctx, &new_db->rel_arr,
"pg_toast", new_name, CLUSTER_NEW);
/* finally create a mapping for them */
map_rel(ctx, old_toast, new_toast, old_db, new_db, old_pgdata, new_pgdata,
maps + num_maps);
num_maps++;
/*
* also need to provide a mapping for the index of this toast
* relation. The procedure is similar to what we did above for
* toast relation itself, the only difference being that the
* relnames need to be appended with _index.
*/
/*
* construct the new and old relnames for the toast index
* relations
*/
snprintf(old_name, sizeof(old_name), "%s_index", old_toast->relname);
snprintf(new_name, sizeof(new_name), "pg_toast_%u_index",
newrel->reloid);
/* look them up in their respective arrays */
old_toast = relarr_lookup_rel(ctx, &old_db->rel_arr,
"pg_toast", old_name, CLUSTER_OLD);
new_toast = relarr_lookup_rel(ctx, &new_db->rel_arr,
"pg_toast", new_name, CLUSTER_NEW);
/* finally create a mapping for them */
map_rel(ctx, old_toast, new_toast, old_db, new_db, old_pgdata,
new_pgdata, maps + num_maps);
num_maps++;
}
}
*nmaps = num_maps;
return maps;
}
开发者ID:gluefinance,项目名称:postgres,代码行数:95,代码来源:info.c
示例11: ReadDataFromArchiveZlib
static void
ReadDataFromArchiveZlib(ArchiveHandle *AH, ReadFunc readF)
{
z_streamp zp;
char *out;
int res = Z_OK;
size_t cnt;
char *buf;
size_t buflen;
zp = (z_streamp) pg_malloc(sizeof(z_stream));
zp->zalloc = Z_NULL;
zp->zfree = Z_NULL;
zp->opaque = Z_NULL;
buf = pg_malloc(ZLIB_IN_SIZE);
buflen = ZLIB_IN_SIZE;
out = pg_malloc(ZLIB_OUT_SIZE + 1);
if (inflateInit(zp) != Z_OK)
exit_horribly(modulename,
"could not initialize compression library: %s\n",
zp->msg);
/* no minimal chunk size for zlib */
while ((cnt = readF(AH, &buf, &buflen)))
{
zp->next_in = (void *) buf;
zp->avail_in = cnt;
while (zp->avail_in > 0)
{
zp->next_out = (void *) out;
zp->avail_out = ZLIB_OUT_SIZE;
res = inflate(zp, 0);
if (res != Z_OK && res != Z_STREAM_END)
exit_horribly(modulename,
"could not uncompress data: %s\n", zp->msg);
out[ZLIB_OUT_SIZE - zp->avail_out] = '\0';
ahwrite(out, 1, ZLIB_OUT_SIZE - zp->avail_out, AH);
}
}
zp->next_in = NULL;
zp->avail_in = 0;
while (res != Z_STREAM_END)
{
zp->next_out = (void *) out;
zp->avail_out = ZLIB_OUT_SIZE;
res = inflate(zp, 0);
if (res != Z_OK && res != Z_STREAM_END)
exit_horribly(modulename,
"could not uncompress data: %s\n", zp->msg);
out[ZLIB_OUT_SIZE - zp->avail_out] = '\0';
ahwrite(out, 1, ZLIB_OUT_SIZE - zp->avail_out, AH);
}
if (inflateEnd(zp) != Z_OK)
exit_horribly(modulename,
"could not close compression library: %s\n", zp->msg);
free(buf);
free(out);
free(zp);
}
开发者ID:devmario,项目名称:postgres,代码行数:69,代码来源:compress_io.c
示例12: receiveFileChunks
/*----
* Runs a query, which returns pieces of files from the remote source data
* directory, and overwrites the corresponding parts of target files with
* the received parts. The result set is expected to be of format:
*
* path text -- path in the data directory, e.g "base/1/123"
* begin int8 -- offset within the file
* chunk bytea -- file content
*----
*/
static void
receiveFileChunks(const char *sql)
{
PGresult *res;
if (PQsendQueryParams(conn, sql, 0, NULL, NULL, NULL, NULL, 1) != 1)
pg_fatal("could not send query: %s", PQerrorMessage(conn));
pg_log(PG_DEBUG, "getting file chunks\n");
if (PQsetSingleRowMode(conn) != 1)
pg_fatal("could not set libpq connection to single row mode\n");
while ((res = PQgetResult(conn)) != NULL)
{
char *filename;
int filenamelen;
int64 chunkoff;
char chunkoff_str[32];
int chunksize;
char *chunk;
switch (PQresultStatus(res))
{
case PGRES_SINGLE_TUPLE:
break;
case PGRES_TUPLES_OK:
PQclear(res);
continue; /* final zero-row result */
default:
pg_fatal("unexpected result while fetching remote files: %s",
PQresultErrorMessage(res));
}
/* sanity check the result set */
if (PQnfields(res) != 3 || PQntuples(res) != 1)
pg_fatal("unexpected result set size while fetching remote files\n");
if (PQftype(res, 0) != TEXTOID ||
PQftype(res, 1) != INT8OID ||
PQftype(res, 2) != BYTEAOID)
{
pg_fatal("unexpected data types in result set while fetching remote files: %u %u %u\n",
PQftype(res, 0), PQftype(res, 1), PQftype(res, 2));
}
if (PQfformat(res, 0) != 1 &&
PQfformat(res, 1) != 1 &&
PQfformat(res, 2) != 1)
{
pg_fatal("unexpected result format while fetching remote files\n");
}
if (PQgetisnull(res, 0, 0) ||
PQgetisnull(res, 0, 1))
{
pg_fatal("unexpected null values in result while fetching remote files\n");
}
if (PQgetlength(res, 0, 1) != sizeof(int64))
pg_fatal("unexpected result length while fetching remote files\n");
/* Read result set to local variables */
memcpy(&chunkoff, PQgetvalue(res, 0, 1), sizeof(int64));
chunkoff = pg_recvint64(chunkoff);
chunksize = PQgetlength(res, 0, 2);
filenamelen = PQgetlength(res, 0, 0);
filename = pg_malloc(filenamelen + 1);
memcpy(filename, PQgetvalue(res, 0, 0), filenamelen);
filename[filenamelen] = '\0';
chunk = PQgetvalue(res, 0, 2);
/*
* If a file has been deleted on the source, remove it on the target
* as well. Note that multiple unlink() calls may happen on the same
* file if multiple data chunks are associated with it, hence ignore
* unconditionally anything missing. If this file is not a relation
* data file, then it has been already truncated when creating the
* file chunk list at the previous execution of the filemap.
*/
if (PQgetisnull(res, 0, 2))
{
pg_log(PG_DEBUG,
"received null value for chunk for file \"%s\", file has been deleted\n",
filename);
remove_target_file(filename, true);
//.........这里部分代码省略.........
开发者ID:bitnine-oss,项目名称:agens-graph,代码行数:101,代码来源:libpq_fetch.c
示例13: main
int
main(int argc, char **argv)
{
struct options *my_opts;
PGconn *pgconn;
my_opts = (struct options *) pg_malloc(sizeof(struct options));
my_opts->oids = (eary *) pg_malloc(sizeof(eary));
my_opts->tables = (eary *) pg_malloc(sizeof(eary));
my_opts->filenodes = (eary *) pg_malloc(sizeof(eary));
my_opts->oids->num = my_opts->oids->alloc = 0;
my_opts->tables->num = my_opts->tables->alloc = 0;
my_opts->filenodes->num = my_opts->filenodes->alloc = 0;
/* parse the opts */
get_opts(argc, argv, my_opts);
if (my_opts->dbname == NULL)
{
my_opts->dbname = "postgres";
my_opts->nodb = true;
}
pgconn = sql_conn(my_opts);
/* display only tablespaces */
if (my_opts->tablespaces)
{
if (!my_opts->quiet)
printf("All tablespaces:\n");
sql_exec_dumpalltbspc(pgconn, my_opts);
PQfinish(pgconn);
exit(0);
}
/* display the given elements in the database */
if (my_opts->oids->num > 0 ||
my_opts->tables->num > 0 ||
my_opts->filenodes->num > 0)
{
if (!my_opts->quiet)
printf("From database \"%s\":\n", my_opts->dbname);
sql_exec_searchtables(pgconn, my_opts);
PQfinish(pgconn);
exit(0);
}
/* no elements given; dump the given database */
if (my_opts->dbname && !my_opts->nodb)
{
if (!my_opts->quiet)
printf("From database \"%s\":\n", my_opts->dbname);
sql_exec_dumpalltables(pgconn, my_opts);
PQfinish(pgconn);
exit(0);
}
/* no database either; dump all databases */
if (!my_opts->quiet)
printf("All databases:\n");
sql_exec_dumpalldbs(pgconn, my_opts);
PQfinish(pgconn);
return 0;
}
开发者ID:adam8157,项目名称:gpdb,代码行数:69,代码来源:oid2name.c
示例14: sql_exec_searchtables
/*
* Show oid, filenode, name, schema and tablespace for each of the
* given objects in the current database.
*/
void
sql_exec_searchtables(PGconn *conn, struct options * opts)
{
char *todo;
char *qualifiers,
*ptr;
char *comma_oids,
*comma_filenodes,
*comma_tables;
bool written = false;
char *addfields = ",c.oid AS \"Oid\", nspname AS \"Schema\", spcname as \"Tablespace\" ";
/* get tables qualifiers, whether names, filenodes, or OIDs */
comma_oids = get_comma_elts(opts->oids);
comma_tables = get_comma_elts(opts->tables);
comma_filenodes = get_comma_elts(opts->filenodes);
/* 80 extra chars for SQL expression */
qualifiers = (char *) pg_malloc(strlen(comma_oids) + strlen(comma_tables) +
strlen(comma_filenodes) + 80);
ptr = qualifiers;
if (opts->oids->num > 0)
{
ptr += sprintf(ptr, "c.oid IN (%s)", comma_oids);
written = true;
}
if (opts->filenodes->num > 0)
{
if (written)
ptr += sprintf(ptr, " OR ");
ptr += sprintf(ptr, "pg_catalog.pg_relation_filenode(c.oid) IN (%s)", comma_filenodes);
written = true;
}
if (opts->tables->num > 0)
{
if (written)
ptr += sprintf(ptr, " OR ");
sprintf(ptr, "c.relname ~~ ANY (ARRAY[%s])", comma_tables);
}
free(comma_oids);
free(comma_tables);
free(comma_filenodes);
/* now build the query */
todo = psprintf(
"SELECT pg_catalog.pg_relation_filenode(c.oid) as \"Filenode\", relname as \"Table Name\" %s\n"
"FROM pg_catalog.pg_class c \n"
" LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace \n"
" LEFT JOIN pg_catalog.pg_database d ON d.datname = pg_catalog.current_database(),\n"
" pg_catalog.pg_tablespace t \n"
"WHERE relkind IN ('r', 'm', 'i', 'S', 't') AND \n"
" t.oid = CASE\n"
" WHEN reltablespace <> 0 THEN reltablespace\n"
" ELSE dattablespace\n"
" END AND \n"
" (%s) \n"
"ORDER BY relname\n",
opts->extended ? addfields : "",
qualifiers);
free(qualifiers);
sql_exec(conn, todo, opts->quiet);
}
开发者ID:adam8157,项目名称:gpdb,代码行数:69,代码来源:oid2name.c
示例15: process_target_file
/*
* Callback for processing target file list.
*
* All source files must be already processed before calling this. This only
* marks target data directory's files that didn't exist in the source for
* deletion.
*/
void
process_target_file(const char *path, file_type_t type, size_t oldsize,
const char *link_target)
{
bool exists;
char localpath[MAXPGPATH];
struct stat statbuf;
file_entry_t key;
file_entry_t *key_ptr;
filemap_t *map = filemap;
file_entry_t *entry;
snprintf(localpath, sizeof(localpath), "%s/%s", datadir_target, path);
if (lstat(localpath, &statbuf) < 0)
{
if (errno != ENOENT)
pg_fatal("could not stat file \"%s\": %s\n",
localpath, strerror(errno));
exists = false;
}
if (map->array == NULL)
{
/* on first call, initialize lookup array */
if (map->nlist == 0)
{
/* should not happen */
pg_fatal("source file list is empty\n");
}
filemap_list_to_array(map);
Assert(map->array != NULL);
qsort(map->array, map->narray, sizeof(file_entry_t *), path_cmp);
}
/*
* Completely ignore some special files
*/
if (strcmp(path, "postmaster.pid") == 0 ||
strcmp(path, "postmaster.opts") == 0)
return;
/*
* Like in process_source_file, pretend that xlog is always a directory.
*/
if (strcmp(path, "pg_wal") == 0 && type == FILE_TYPE_SYMLINK)
type = FILE_TYPE_DIRECTORY;
key.path = (char *) path;
key_ptr = &key;
exists = (bsearch(&key_ptr, map->array, map->narray, sizeof(file_entry_t *),
path_cmp) != NULL);
/* Remove any file or folder that doesn't exist in the source system. */
if (!exists)
{
entry = pg_malloc(sizeof(file_entry_t));
entry->path = pg_strdup(path);
entry->type = type;
entry->action = FILE_ACTION_REMOVE;
entry->oldsize = oldsize;
entry->newsize = 0;
entry->link_target = link_target ? pg_strdup(link_target) : NULL;
entry->next = NULL;
entry->pagemap.bitmap = NULL;
entry->pagemap.bitmapsize = 0;
entry->isrelfile = isRelDataFile(path);
if (map->last == NULL)
map->first = entry;
else
map->last->next = entry;
map->last = entry;
map->nlist++;
}
else
{
/*
* We already handled all files that exist in the source system in
* process_source_file().
*/
}
}
开发者ID:bitnine-oss,项目名称:agens-graph,代码行数:93,代码来源:filemap.c
示例16: create_script_for_cluster_analyze
/*
* create_script_for_cluster_analyze()
*
* This incrementally generates better optimizer statistics
*/
void
create_script_for_cluster_analyze(char **analyze_script_file_name)
{
FILE *script = NULL;
*analyze_script_file_name = pg_malloc(MAXPGPATH);
prep_status("Creating script to analyze new cluster");
snprintf(*analyze_script_file_name, MAXPGPATH, "analyze_new_cluster.%s",
SCRIPT_EXT);
if ((script = fopen_priv(*analyze_script_file_name, "w")) == NULL)
pg_log(PG_FATAL, "Could not open file \"%s\": %s\n",
*analyze_script_file_name, getErrorText(errno));
#ifndef WIN32
/* add shebang header */
fprintf(script, "#!/bin/sh\n\n");
#endif
fprintf(script, "echo %sThis script will generate minimal optimizer statistics rapidly%s\n",
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %sso your system is usable, and then gather statistics twice more%s\n",
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %swith increasing accuracy. When it is done, your system will%s\n",
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %shave the default level of optimizer statistics.%s\n",
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo\n\n");
fprintf(script, "echo %sIf you have used ALTER TABLE to modify the statistics target for%s\n",
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %sany tables, you might want to remove them and restore them after%s\n",
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %srunning this script because they will delay fast statistics generation.%s\n",
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo\n\n");
fprintf(script, "echo %sIf you would like default statistics as quickly as possible, cancel%s\n",
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %sthis script and run:%s\n",
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %s vacuumdb --all %s%s\n", ECHO_QUOTE,
/* Did we copy the free space files? */
(GET_MAJOR_VERSION(old_cluster.major_version) >= 804) ?
"--analyze-only" : "--analyze", ECHO_QUOTE);
fprintf(script, "echo\n\n");
#ifndef WIN32
fprintf(script, "sleep 2\n");
fprintf(script, "PGOPTIONS='-c default_statistics_target=1 -c vacuum_cost_delay=0'\n");
/* only need to export once */
fprintf(script, "export PGOPTIONS\n");
#else
fprintf(script, "REM simulate sleep 2\n");
fprintf(script, "PING 1.1.1.1 -n 1 -w 2000 > nul\n");
fprintf(script, "SET PGOPTIONS=-c default_statistics_target=1 -c vacuum_cost_delay=0\n");
#endif
fprintf(script, "echo %sGenerating minimal optimizer statistics (1 target)%s\n",
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %s--------------------------------------------------%s\n",
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "vacuumdb --all --analyze-only\n");
fprintf(script, "echo\n");
fprintf(script, "echo %sThe server is now available with minimal optimizer statistics.%s\n",
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %sQuery performance will be optimal once this script completes.%s\n",
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo\n\n");
#ifndef WIN32
fprintf(script, "sleep 2\n");
fprintf(script, "PGOPTIONS='-c default_statistics_target=10'\n");
#else
fprintf(script, "REM simulate sleep\n");
fprintf(script, "PING 1.1.1.1 -n 1 -w 2000 > nul\n");
fprintf(script, "SET PGOPTIONS=-c default_statistics_target=10\n");
#endif
fprintf(script, "echo %sGenerating medium optimizer statistics (10 targets)%s\n",
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %s---------------------------------------------------%s\n",
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "vacuumdb --all --analyze-only\n");
fprintf(script, "echo\n\n");
#ifndef WIN32
fprintf(script, "unset PGOPTIONS\n");
#else
fprintf(script, "SET PGOPTIONS\n");
#endif
fprintf(script, "echo %sGenerating default (full) optimizer statistics (100 targets?)%s\n",
//.........这里部分代码省略.........
开发者ID:hqinnus,项目名称:postgres,代码行数:101,代码来源:check.c
示例17: process_source_file
//.........这里部分代码省略.........
)
{
/*
* It's a symbolic link in source, but not in target.
* Strange..
*/
pg_fatal("\"%s\" is not a symbolic link\n", localpath);
}
if (!exists)
action = FILE_ACTION_CREATE;
else
action = FILE_ACTION_NONE;
oldsize = 0;
break;
case FILE_TYPE_REGULAR:
if (exists && !S_ISREG(statbuf.st_mode))
pg_fatal("\"%s\" is not a regular file\n", localpath);
if (!exist
|
请发表评论