本文整理汇总了C++中PQgetvalue函数的典型用法代码示例。如果您正苦于以下问题:C++ PQgetvalue函数的具体用法?C++ PQgetvalue怎么用?C++ PQgetvalue使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了PQgetvalue函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: StandbyMonitor
/*
* Insert monitor info, this is basically the time and xlog replayed,
* applied on standby and current xlog location in primary.
* Also do the math to see how far are we in bytes for being uptodate
*/
static void
StandbyMonitor(void)
{
PGresult *res;
char monitor_standby_timestamp[MAXLEN];
char last_wal_primary_location[MAXLEN];
char last_wal_standby_received[MAXLEN];
char last_wal_standby_applied[MAXLEN];
unsigned long long int lsn_primary;
unsigned long long int lsn_standby_received;
unsigned long long int lsn_standby_applied;
int connection_retries;
/*
* Check if the master is still available, if after 5 minutes of retries
* we cannot reconnect, try to get a new master.
*/
CheckPrimaryConnection(); // this take up to NUM_RETRY * SLEEP_RETRY seconds
if (PQstatus(primaryConn) != CONNECTION_OK)
{
if (local_options.failover == MANUAL_FAILOVER)
{
log_err(_("We couldn't reconnect to master. Now checking if another node has been promoted.\n"));
for (connection_retries = 0; connection_retries < 6; connection_retries++)
{
primaryConn = getMasterConnection(myLocalConn, repmgr_schema, local_options.node,
local_options.cluster_name, &primary_options.node, NULL);
if (PQstatus(primaryConn) == CONNECTION_OK)
{
/* Connected, we can continue the process so break the loop */
log_err(_("Connected to node %d, continue monitoring.\n"), primary_options.node);
break;
}
else
{
log_err(_("We haven't found a new master, waiting before retry...\n"));
/* wait 5 minutes before retries, after 6 failures (30 minutes) we stop trying */
sleep(300);
}
}
if (PQstatus(primaryConn) != CONNECTION_OK)
{
log_err(_("We couldn't reconnect for long enough, exiting...\n"));
exit(ERR_DB_CON);
}
}
else if (local_options.failover == AUTOMATIC_FAILOVER)
{
/*
* When we returns from this function we will have a new primary and
* a new primaryConn
*/
do_failover();
}
}
/* Check if we still are a standby, we could have been promoted */
if (!is_standby(myLocalConn))
{
log_err(_("It seems like we have been promoted, so exit from monitoring...\n"));
CloseConnections();
exit(ERR_PROMOTED);
}
/*
* first check if there is a command being executed,
* and if that is the case, cancel the query so i can
* insert the current record
*/
if (PQisBusy(primaryConn) == 1)
CancelQuery();
/* Get local xlog info */
sqlquery_snprintf(
sqlquery,
"SELECT CURRENT_TIMESTAMP, pg_last_xlog_receive_location(), "
"pg_last_xlog_replay_location()");
res = PQexec(myLocalConn, sqlquery);
if (PQresultStatus(res) != PGRES_TUPLES_OK)
{
log_err(_("PQexec failed: %s\n"), PQerrorMessage(myLocalConn));
PQclear(res);
/* if there is any error just let it be and retry in next loop */
return;
}
strncpy(monitor_standby_timestamp, PQgetvalue(res, 0, 0), MAXLEN);
strncpy(last_wal_standby_received , PQgetvalue(res, 0, 1), MAXLEN);
strncpy(last_wal_standby_applied , PQgetvalue(res, 0, 2), MAXLEN);
PQclear(res);
//.........这里部分代码省略.........
开发者ID:klando,项目名称:repmgr,代码行数:101,代码来源:repmgrd.c
示例2: lo_initialize
/*
* lo_initialize
*
* Initialize the large object interface for an existing connection.
* We ask the backend about the functions OID's in pg_proc for all
* functions that are required for large object operations.
*/
static int
lo_initialize(PGconn *conn)
{
PGresult *res;
PGlobjfuncs *lobjfuncs;
int n;
const char *query;
const char *fname;
Oid foid;
/*
* Allocate the structure to hold the functions OID's
*/
lobjfuncs = (PGlobjfuncs *) malloc(sizeof(PGlobjfuncs));
if (lobjfuncs == NULL)
{
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("out of memory\n"));
return -1;
}
MemSet((char *) lobjfuncs, 0, sizeof(PGlobjfuncs));
/*
* Execute the query to get all the functions at once. In 7.3 and later
* we need to be schema-safe. lo_create only exists in 8.1 and up.
* lo_truncate only exists in 8.3 and up.
*/
if (conn->sversion >= 70300)
query = "select proname, oid from pg_catalog.pg_proc "
"where proname in ("
"'lo_open', "
"'lo_close', "
"'lo_creat', "
"'lo_create', "
"'lo_unlink', "
"'lo_lseek', "
"'lo_tell', "
"'lo_truncate', "
"'loread', "
"'lowrite') "
"and pronamespace = (select oid from pg_catalog.pg_namespace "
"where nspname = 'pg_catalog')";
else
query = "select proname, oid from pg_proc "
"where proname = 'lo_open' "
"or proname = 'lo_close' "
"or proname = 'lo_creat' "
"or proname = 'lo_unlink' "
"or proname = 'lo_lseek' "
"or proname = 'lo_tell' "
"or proname = 'loread' "
"or proname = 'lowrite'";
res = PQexec(conn, query);
if (res == NULL)
{
free(lobjfuncs);
return -1;
}
if (res->resultStatus != PGRES_TUPLES_OK)
{
free(lobjfuncs);
PQclear(res);
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("query to initialize large object functions did not return data\n"));
return -1;
}
/*
* Examine the result and put the OID's into the struct
*/
for (n = 0; n < PQntuples(res); n++)
{
fname = PQgetvalue(res, n, 0);
foid = (Oid) atoi(PQgetvalue(res, n, 1));
if (strcmp(fname, "lo_open") == 0)
lobjfuncs->fn_lo_open = foid;
else if (strcmp(fname, "lo_close") == 0)
lobjfuncs->fn_lo_close = foid;
else if (strcmp(fname, "lo_creat") == 0)
lobjfuncs->fn_lo_creat = foid;
else if (strcmp(fname, "lo_create") == 0)
lobjfuncs->fn_lo_create = foid;
else if (strcmp(fname, "lo_unlink") == 0)
lobjfuncs->fn_lo_unlink = foid;
else if (strcmp(fname, "lo_lseek") == 0)
lobjfuncs->fn_lo_lseek = foid;
else if (strcmp(fname, "lo_tell") == 0)
lobjfuncs->fn_lo_tell = foid;
else if (strcmp(fname, "lo_truncate") == 0)
lobjfuncs->fn_lo_truncate = foid;
else if (strcmp(fname, "loread") == 0)
//.........这里部分代码省略.........
开发者ID:avontd2868,项目名称:postgres,代码行数:101,代码来源:fe-lobj.c
示例3: new_9_0_populate_pg_largeobject_metadata
/*
* new_9_0_populate_pg_largeobject_metadata()
* new >= 9.0, old <= 8.4
* 9.0 has a new pg_largeobject permission table
*/
void
new_9_0_populate_pg_largeobject_metadata(ClusterInfo *cluster, bool check_mode)
{
int dbnum;
FILE *script = NULL;
bool found = false;
char output_path[MAXPGPATH];
prep_status("Checking for large objects");
snprintf(output_path, sizeof(output_path), "pg_largeobject.sql");
for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
{
PGresult *res;
int i_count;
DbInfo *active_db = &cluster->dbarr.dbs[dbnum];
PGconn *conn = connectToServer(cluster, active_db->db_name);
/* find if there are any large objects */
res = executeQueryOrDie(conn,
"SELECT count(*) "
"FROM pg_catalog.pg_largeobject ");
i_count = PQfnumber(res, "count");
if (atoi(PQgetvalue(res, 0, i_count)) != 0)
{
found = true;
if (!check_mode)
{
PQExpBufferData connectbuf;
if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
pg_fatal("could not open file \"%s\": %s\n", output_path,
strerror(errno));
initPQExpBuffer(&connectbuf);
appendPsqlMetaConnect(&connectbuf, active_db->db_name);
fputs(connectbuf.data, script);
termPQExpBuffer(&connectbuf);
fprintf(script,
"SELECT pg_catalog.lo_create(t.loid)\n"
"FROM (SELECT DISTINCT loid FROM pg_catalog.pg_largeobject) AS t;\n");
}
}
PQclear(res);
PQfinish(conn);
}
if (script)
fclose(script);
if (found)
{
report_status(PG_WARNING, "warning");
if (check_mode)
pg_log(PG_WARNING, "\n"
"Your installation contains large objects. The new database has an\n"
"additional large object permission table. After upgrading, you will be\n"
"given a command to populate the pg_largeobject_metadata table with\n"
"default permissions.\n\n");
else
pg_log(PG_WARNING, "\n"
"Your installation contains large objects. The new database has an\n"
"additional large object permission table, so default permissions must be\n"
"defined for all large objects. The file\n"
" %s\n"
"when executed by psql by the database superuser will set the default\n"
"permissions.\n\n",
output_path);
}
else
check_ok();
}
开发者ID:AmiGanguli,项目名称:postgres,代码行数:81,代码来源:version.c
示例4: StreamLog
/*
* Start the log streaming
*/
static void
StreamLog(void)
{
PGresult *res;
uint32 timeline;
XLogRecPtr startpos;
int minServerMajor,
maxServerMajor;
int serverMajor;
/*
* Connect in replication mode to the server
*/
conn = GetConnection();
if (!conn)
/* Error message already written in GetConnection() */
return;
/*
* Check server version. IDENTIFY_SYSTEM didn't return the current xlog
* position before 9.1, so we can't work with servers older than 9.1. And
* we don't support servers newer than the client.
*/
minServerMajor = 901;
maxServerMajor = PG_VERSION_NUM / 100;
serverMajor = PQserverVersion(conn) / 100;
if (serverMajor < minServerMajor || serverMajor > maxServerMajor)
{
fprintf(stderr, _("%s: unsupported server version %s\n"),
progname, PQparameterStatus(conn, "server_version"));
disconnect_and_exit(1);
}
/*
* Run IDENTIFY_SYSTEM so we can get the timeline and current xlog
* position.
*/
res = PQexec(conn, "IDENTIFY_SYSTEM");
if (PQresultStatus(res) != PGRES_TUPLES_OK)
{
fprintf(stderr, _("%s: could not send replication command \"%s\": %s"),
progname, "IDENTIFY_SYSTEM", PQerrorMessage(conn));
disconnect_and_exit(1);
}
if (PQntuples(res) != 1 || PQnfields(res) != 3)
{
fprintf(stderr,
_("%s: could not identify system: got %d rows and %d fields, expected %d rows and %d fields\n"),
progname, PQntuples(res), PQnfields(res), 1, 3);
disconnect_and_exit(1);
}
timeline = atoi(PQgetvalue(res, 0, 1));
if (sscanf(PQgetvalue(res, 0, 2), "%X/%X", &startpos.xlogid, &startpos.xrecoff) != 2)
{
fprintf(stderr,
_("%s: could not parse transaction log location \"%s\"\n"),
progname, PQgetvalue(res, 0, 2));
disconnect_and_exit(1);
}
PQclear(res);
/*
* Figure out where to start streaming.
*/
startpos = FindStreamingStart(startpos, timeline);
/*
* Always start streaming at the beginning of a segment
*/
startpos.xrecoff -= startpos.xrecoff % XLOG_SEG_SIZE;
/*
* Start the replication
*/
if (verbose)
fprintf(stderr,
_("%s: starting log streaming at %X/%X (timeline %u)\n"),
progname, startpos.xlogid, startpos.xrecoff, timeline);
ReceiveXlogStream(conn, startpos, timeline, NULL, basedir,
stop_streaming, standby_message_timeout, false);
PQfinish(conn);
}
开发者ID:pgresql,项目名称:postgres-xl,代码行数:89,代码来源:pg_receivexlog.c
示例5: vacuum_all_databases
/*
* Vacuum/analyze all connectable databases.
*
* In analyze-in-stages mode, we process all databases in one stage before
* moving on to the next stage. That ensure minimal stats are available
* quickly everywhere before generating more detailed ones.
*/
static void
vacuum_all_databases(vacuumingOptions *vacopts,
bool analyze_in_stages,
const char *maintenance_db, const char *host,
const char *port, const char *username,
enum trivalue prompt_password,
int concurrentCons,
const char *progname, bool echo, bool quiet)
{
PGconn *conn;
PGresult *result;
PQExpBufferData connstr;
int stage;
int i;
conn = connectMaintenanceDatabase(maintenance_db, host, port, username,
prompt_password, progname, echo);
result = executeQuery(conn,
"SELECT datname FROM pg_database WHERE datallowconn ORDER BY 1;",
progname, echo);
PQfinish(conn);
initPQExpBuffer(&connstr);
if (analyze_in_stages)
{
/*
* When analyzing all databases in stages, we analyze them all in the
* fastest stage first, so that initial statistics become available
* for all of them as soon as possible.
*
* This means we establish several times as many connections, but
* that's a secondary consideration.
*/
for (stage = 0; stage < ANALYZE_NUM_STAGES; stage++)
{
for (i = 0; i < PQntuples(result); i++)
{
resetPQExpBuffer(&connstr);
appendPQExpBuffer(&connstr, "dbname=");
appendConnStrVal(&connstr, PQgetvalue(result, i, 0));
vacuum_one_database(connstr.data, vacopts,
stage,
NULL,
host, port, username, prompt_password,
concurrentCons,
progname, echo, quiet);
}
}
}
else
{
for (i = 0; i < PQntuples(result); i++)
{
resetPQExpBuffer(&connstr);
appendPQExpBuffer(&connstr, "dbname=");
appendConnStrVal(&connstr, PQgetvalue(result, i, 0));
vacuum_one_database(connstr.data, vacopts,
ANALYZE_NO_STAGE,
NULL,
host, port, username, prompt_password,
concurrentCons,
progname, echo, quiet);
}
}
termPQExpBuffer(&connstr);
PQclear(result);
}
开发者ID:bjornharrtell,项目名称:postgres,代码行数:77,代码来源:vacuumdb.c
示例6: get_loadable_libraries
/*
* get_loadable_libraries()
*
* Fetch the names of all old libraries containing C-language functions.
* We will later check that they all exist in the new installation.
*/
void
get_loadable_libraries(void)
{
PGresult **ress;
int totaltups;
int dbnum;
ress = (PGresult **) pg_malloc(old_cluster.dbarr.ndbs * sizeof(PGresult *));
totaltups = 0;
/* Fetch all library names, removing duplicates within each DB */
for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
{
DbInfo *active_db = &old_cluster.dbarr.dbs[dbnum];
PGconn *conn = connectToServer(&old_cluster, active_db->db_name);
/*
* Fetch all libraries referenced in this DB. We can't exclude
* the "pg_catalog" schema because, while such functions are not
* explicitly dumped by pg_dump, they do reference implicit objects
* that pg_dump does dump, e.g. CREATE LANGUAGE plperl.
*/
ress[dbnum] = executeQueryOrDie(conn,
"SELECT DISTINCT probin "
"FROM pg_catalog.pg_proc "
"WHERE prolang = 13 /* C */ AND "
"probin IS NOT NULL AND "
"oid >= %u;",
FirstNormalObjectId);
totaltups += PQntuples(ress[dbnum]);
PQfinish(conn);
}
totaltups++; /* reserve for pg_upgrade_support */
/* Allocate what's certainly enough space */
os_info.libraries = (char **) pg_malloc(totaltups * sizeof(char *));
/*
* Now remove duplicates across DBs. This is pretty inefficient code, but
* there probably aren't enough entries to matter.
*/
totaltups = 0;
os_info.libraries[totaltups++] = pg_strdup(PG_UPGRADE_SUPPORT);
for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
{
PGresult *res = ress[dbnum];
int ntups;
int rowno;
ntups = PQntuples(res);
for (rowno = 0; rowno < ntups; rowno++)
{
char *lib = PQgetvalue(res, rowno, 0);
bool dup = false;
int n;
for (n = 0; n < totaltups; n++)
{
if (strcmp(lib, os_info.libraries[n]) == 0)
{
dup = true;
break;
}
}
if (!dup)
os_info.libraries[totaltups++] = pg_strdup(lib);
}
PQclear(res);
}
os_info.num_libraries = totaltups;
pg_free(ress);
}
开发者ID:mlum,项目名称:postgres,代码行数:84,代码来源:function.c
示例7: get_rel_infos
//.........这里部分代码省略.........
/*
* And now we can write the query that retrieves the data we want for each
* heap and index relation. Make sure result is sorted by OID.
*/
snprintf(query + strlen(query), sizeof(query) - strlen(query),
"SELECT all_rels.*, n.nspname, c.relname, "
" c.relfilenode, c.reltablespace, %s "
"FROM (SELECT * FROM regular_heap "
" UNION ALL "
" SELECT * FROM toast_heap "
" UNION ALL "
" SELECT * FROM all_index) all_rels "
" JOIN pg_catalog.pg_class c "
" ON all_rels.reloid = c.oid "
" JOIN pg_catalog.pg_namespace n "
" ON c.relnamespace = n.oid "
" LEFT OUTER JOIN pg_catalog.pg_tablespace t "
" ON c.reltablespace = t.oid "
"ORDER BY 1;",
/* 9.2 removed the pg_tablespace.spclocation column */
(GET_MAJOR_VERSION(cluster->major_version) >= 902) ?
"pg_catalog.pg_tablespace_location(t.oid) AS spclocation" :
"t.spclocation");
res = executeQueryOrDie(conn, "%s", query);
ntups = PQntuples(res);
relinfos = (RelInfo *) pg_malloc(sizeof(RelInfo) * ntups);
i_reloid = PQfnumber(res, "reloid");
i_indtable = PQfnumber(res, "indtable");
i_toastheap = PQfnumber(res, "toastheap");
i_nspname = PQfnumber(res, "nspname");
i_relname = PQfnumber(res, "relname");
i_relfilenode = PQfnumber(res, "relfilenode");
i_reltablespace = PQfnumber(res, "reltablespace");
i_spclocation = PQfnumber(res, "spclocation");
for (relnum = 0; relnum < ntups; relnum++)
{
RelInfo *curr = &relinfos[num_rels++];
curr->reloid = atooid(PQgetvalue(res, relnum, i_reloid));
curr->indtable = atooid(PQgetvalue(res, relnum, i_indtable));
curr->toastheap = atooid(PQgetvalue(res, relnum, i_toastheap));
nspname = PQgetvalue(res, relnum, i_nspname);
curr->nsp_alloc = false;
/*
* Many of the namespace and tablespace strings are identical, so we
* try to reuse the allocated string pointers where possible to reduce
* memory consumption.
*/
/* Can we reuse the previous string allocation? */
if (last_namespace && strcmp(nspname, last_namespace) == 0)
curr->nspname = last_namespace;
else
{
last_namespace = curr->nspname = pg_strdup(nspname);
curr->nsp_alloc = true;
}
relname = PQgetvalue(res, relnum, i_relname);
curr->relname = pg_strdup(relname);
curr->relfilenode = atooid(PQgetvalue(res, relnum, i_relfilenode));
curr->tblsp_alloc = false;
/* Is the tablespace oid non-default? */
if (atooid(PQgetvalue(res, relnum, i_reltablespace)) != 0)
{
/*
* The tablespace location might be "", meaning the cluster
* default location, i.e. pg_default or pg_global.
*/
tablespace = PQgetvalue(res, relnum, i_spclocation);
/* Can we reuse the previous string allocation? */
if (last_tablespace && strcmp(tablespace, last_tablespace) == 0)
curr->tablespace = last_tablespace;
else
{
last_tablespace = curr->tablespace = pg_strdup(tablespace);
curr->tblsp_alloc = true;
}
}
else
/* A zero reltablespace oid indicates the database tablespace. */
curr->tablespace = dbinfo->db_tablespace;
}
PQclear(res);
PQfinish(conn);
dbinfo->rel_arr.rels = relinfos;
dbinfo->rel_arr.nrels = num_rels;
}
开发者ID:adityavs,项目名称:postgres,代码行数:101,代码来源:info.c
示例8: create_store_func
int create_store_func(PGconn *__con,char *place_num,char *store_name,char *password,char *__sendBuf){
char sql[BUFSIZE];
char sql0[BUFSIZE];
char sql1[BUFSIZE];
char sql2[BUFSIZE];
PGresult *res; //PGresultオブジェクト
PGresult *res0; //PGresultオブジェクト
int resultRows;
int max;
int max_row;
int store_num;
/* 新店舗追加SQLを作成 */
sprintf(sql, "INSERT INTO store VALUES(%s, %s, '%s', '%s')","nextval ('item_code_seq')", place_num, store_name, password);
/* SQLコマンド実行 */
res = PQexec(__con, sql);
/* SQLコマンド実行結果状態を確認 */
if( PQresultStatus(res) != PGRES_COMMAND_OK){
printf("%s", PQresultErrorMessage(res));
sprintf(__sendBuf, "%s %s%s.\n", "ER_STAT", "E_CODE", "\n");
return -1;
}
sprintf(sql0, "select * from store");
/* SQLコマンド実行 */
res0 = PQexec(__con, sql0);
/* SQLコマンド実行結果状態を確認 */
if( PQresultStatus(res0) != PGRES_TUPLES_OK){
printf("%s", PQresultErrorMessage(res0));
sprintf(__sendBuf, "%s %s%s.\n", "ER_STAT2", "E_CODE", "\n");
return -1;
}
/*CREATE TABLE item_management*/
max_row = PQntuples(res0);
printf("t:%d\n", max_row); //test
store_num = atoi(PQgetvalue(res0, max_row-1, 0));
/* 新店舗追加SQLを作成 */
sprintf(sql0, "CREATE TABLE item_management_%d (\
purchase_code serial,\
item_code serial,\
available_period bigint,\
inventory_num integer,\
sale_unit_price integer,\
profit integer,\
procurement_period bigint,\
order_system bool,\
purchase_day bigint,\
order_confirm bool,\
order_interval integer,\
safe_stock integer,\
PRIMARY KEY(purchase_code))"
, store_num);
/* SQLコマンド実行 */
res0 = PQexec(__con, sql0);
sprintf(__sendBuf, "%s%s.\n", "テーブルを登録しました", ENTER);
/* 新店舗追加SQLを作成 */
sprintf(sql1, "CREATE TABLE tax_%d(\
tax_rate real,\
mutual_tax_rate real)"
,store_num);
/* SQLコマンド実行 */
res0 = PQexec(__con, sql1);
sprintf(__sendBuf, "%s%s.\n", "テーブルを登録しました", ENTER);
/* 新店舗追加SQLを作成 */
sprintf(sql2, "CREATE TABLE mod_info_%d (\
slip_num serial,\
before_tax_rate real,\
before_multiple_tax_rate real,\
before_point_rate real,\
PRIMARY KEY(slip_num))"
, store_num);
/* SQLコマンド実行 */
res0 = PQexec(__con, sql2);
sprintf(__sendBuf, "%s%s.\n", "テーブルを登録しました", ENTER);
/* PGresultに割当られた記憶領域を開放 */
PQclear(res);
PQclear(res0);
return 0;
//.........这里部分代码省略.........
开发者ID:tachyonstone,项目名称:new_POS,代码行数:101,代码来源:create_store.c
示例9: v_find_nodes
//.........这里部分代码省略.........
size_t parmcount = 0;
char* fieldp = fieldbuf;
#define SET_FIELD(name, value) \
{ \
parms.set(parmcount++, value); \
fieldp += sprintf(fieldp, "\"" #name "\"=$%Zu AND ", parmcount); \
}
#define SET_FIELD_I(name, value) \
{ \
parms.set(parmcount++, value); \
fieldp += sprintf(fieldp, "LOWER(\"" #name "\")=LOWER($%Zu) AND ", parmcount); \
}
if (nodeTemplate.has_CreateTime())
SET_FIELD(CreateTime, nodeTemplate.m_CreateTime);
if (nodeTemplate.has_ModifyTime())
SET_FIELD(ModifyTime, nodeTemplate.m_ModifyTime);
if (nodeTemplate.has_CreateAgeName())
SET_FIELD(CreateAgeName, nodeTemplate.m_CreateAgeName);
if (nodeTemplate.has_CreateAgeUuid())
SET_FIELD(CreateAgeUuid, nodeTemplate.m_CreateAgeUuid.toString());
if (nodeTemplate.has_CreatorUuid())
SET_FIELD(CreatorUuid, nodeTemplate.m_CreatorUuid.toString());
if (nodeTemplate.has_CreatorIdx())
SET_FIELD(CreatorIdx, nodeTemplate.m_CreatorIdx);
if (nodeTemplate.has_NodeType())
SET_FIELD(NodeType, nodeTemplate.m_NodeType);
if (nodeTemplate.has_Int32_1())
SET_FIELD(Int32_1, nodeTemplate.m_Int32_1);
if (nodeTemplate.has_Int32_2())
SET_FIELD(Int32_2, nodeTemplate.m_Int32_2);
if (nodeTemplate.has_Int32_3())
SET_FIELD(Int32_3, nodeTemplate.m_Int32_3);
if (nodeTemplate.has_Int32_4())
SET_FIELD(Int32_4, nodeTemplate.m_Int32_4);
if (nodeTemplate.has_Uint32_1())
SET_FIELD(Uint32_1, nodeTemplate.m_Uint32_1);
if (nodeTemplate.has_Uint32_2())
SET_FIELD(Uint32_2, nodeTemplate.m_Uint32_2);
if (nodeTemplate.has_Uint32_3())
SET_FIELD(Uint32_3, nodeTemplate.m_Uint32_3);
if (nodeTemplate.has_Uint32_4())
SET_FIELD(Uint32_4, nodeTemplate.m_Uint32_4);
if (nodeTemplate.has_Uuid_1())
SET_FIELD(Uuid_1, nodeTemplate.m_Uuid_1.toString());
if (nodeTemplate.has_Uuid_2())
SET_FIELD(Uuid_2, nodeTemplate.m_Uuid_2.toString());
if (nodeTemplate.has_Uuid_3())
SET_FIELD(Uuid_3, nodeTemplate.m_Uuid_3.toString());
if (nodeTemplate.has_Uuid_4())
SET_FIELD(Uuid_4, nodeTemplate.m_Uuid_4.toString());
if (nodeTemplate.has_String64_1())
SET_FIELD(String64_1, nodeTemplate.m_String64_1);
if (nodeTemplate.has_String64_2())
SET_FIELD(String64_2, nodeTemplate.m_String64_2);
if (nodeTemplate.has_String64_3())
SET_FIELD(String64_3, nodeTemplate.m_String64_3);
if (nodeTemplate.has_String64_4())
SET_FIELD(String64_4, nodeTemplate.m_String64_4);
if (nodeTemplate.has_String64_5())
SET_FIELD(String64_5, nodeTemplate.m_String64_5);
if (nodeTemplate.has_String64_6())
SET_FIELD(String64_6, nodeTemplate.m_String64_6);
if (nodeTemplate.has_IString64_1())
SET_FIELD_I(IString64_1, nodeTemplate.m_IString64_1);
if (nodeTemplate.has_IString64_2())
SET_FIELD_I(IString64_2, nodeTemplate.m_IString64_2);
if (nodeTemplate.has_Text_1())
SET_FIELD(Text_1, nodeTemplate.m_Text_1);
if (nodeTemplate.has_Text_2())
SET_FIELD(Text_2, nodeTemplate.m_Text_2);
if (nodeTemplate.has_Blob_1())
SET_FIELD(Blob_1, DS::Base64Encode(nodeTemplate.m_Blob_1.buffer(), nodeTemplate.m_Blob_1.size()));
if (nodeTemplate.has_Blob_2())
SET_FIELD(Blob_2, DS::Base64Encode(nodeTemplate.m_Blob_2.buffer(), nodeTemplate.m_Blob_2.size()));
#undef SET_FIELD
#undef SET_FIELD_I
DS_DASSERT(parmcount > 0);
DS_DASSERT(fieldp - fieldbuf < 1024);
*(fieldp - 5) = 0; // Get rid of the last ' AND '
DS::String queryStr = "SELECT idx FROM vault.\"Nodes\"\n WHERE ";
queryStr += fieldbuf;
check_postgres();
PGresult* result = PQexecParams(s_postgres, queryStr.c_str(),
parmcount, 0, parms.m_values, 0, 0, 0);
if (PQresultStatus(result) != PGRES_TUPLES_OK) {
fprintf(stderr, "%s:%d:\n Postgres SELECT error: %s\n",
__FILE__, __LINE__, PQerrorMessage(s_postgres));
PQclear(result);
return false;
}
nodes.resize(PQntuples(result));
for (size_t i=0; i<nodes.size(); ++i)
nodes[i] = strtoul(PQgetvalue(result, i, 0), 0, 10);
PQclear(result);
return true;
}
开发者ID:TheEggman,项目名称:dirtsand,代码行数:101,代码来源:AuthVault.cpp
示例10: FmncQPrsList
bool PgTblFmncQPrsList::loadRec(
PGresult* res
, FmncQPrsList** rec
) {
char* ptr;
FmncQPrsList* _rec = NULL;
bool retval = false;
if (PQntuples(res) == 1) {
_rec = new FmncQPrsList();
int fnum[] = {
PQfnumber(res, "qref"),
PQfnumber(res, "jref"),
PQfnumber(res, "jnum"),
PQfnumber(res, "ref"),
PQfnumber(res, "title"),
PQfnumber(res, "firstname"),
PQfnumber(res, "lastname"),
PQfnumber(res, "grp"),
PQfnumber(res, "own"),
PQfnumber(res, "reffmncmorg"),
PQfnumber(res, "reffmncmaddress"),
PQfnumber(res, "ixvsex"),
PQfnumber(res, "tel"),
PQfnumber(res, "eml")
};
ptr = PQgetvalue(res, 0, fnum[0]); _rec->qref = atoll(ptr);
ptr = PQgetvalue(res, 0, fnum[1]); _rec->jref = atoll(ptr);
ptr = PQgetvalue(res, 0, fnum[2]); _rec->jnum = atol(ptr);
ptr = PQgetvalue(res, 0, fnum[3]); _rec->ref = atoll(ptr);
ptr = PQgetvalue(res, 0, fnum[4]); _rec->Title.assign(ptr, PQgetlength(res, 0, fnum[4]));
ptr = PQgetvalue(res, 0, fnum[5]); _rec->Firstname.assign(ptr, PQgetlength(res, 0, fnum[5]));
ptr = PQgetvalue(res, 0, fnum[6]); _rec->Lastname.assign(ptr, PQgetlength(res, 0, fnum[6]));
ptr = PQgetvalue(res, 0, fnum[7]); _rec->grp = atoll(ptr);
ptr = PQgetvalue(res, 0, fnum[8]); _rec->own = atoll(ptr);
ptr = PQgetvalue(res, 0, fnum[9]); _rec->refFmncMOrg = atoll(ptr);
ptr = PQgetvalue(res, 0, fnum[10]); _rec->refFmncMAddress = atoll(ptr);
ptr = PQgetvalue(res, 0, fnum[11]); _rec->ixVSex = atol(ptr);
ptr = PQgetvalue(res, 0, fnum[12]); _rec->Tel.assign(ptr, PQgetlength(res, 0, fnum[12]));
ptr = PQgetvalue(res, 0, fnum[13]); _rec->Eml.assign(ptr, PQgetlength(res, 0, fnum[13]));
retval = true;
};
PQclear(res);
*rec = _rec;
return retval;
};
开发者ID:epsitech,项目名称:fabmaniac,代码行数:52,代码来源:FmncQPrsList.cpp
示例11: PQntuples
ubigint PgTblFmncQPrsList::loadRst(
PGresult* res
, const bool append
, ListFmncQPrsList& rst
) {
ubigint numrow; ubigint numread = 0; char* ptr;
FmncQPrsList* rec;
if (!append) rst.clear();
numrow = PQntuples(res);
if (numrow > 0) {
rst.nodes.reserve(rst.nodes.size() + numrow);
int fnum[] = {
PQfnumber(res, "qref"),
PQfnumber(res, "jref"),
PQfnumber(res, "jnum"),
PQfnumber(res, "ref"),
PQfnumber(res, "title"),
PQfnumber(res, "firstname"),
PQfnumber(res, "lastname"),
PQfnumber(res, "grp"),
PQfnumber(res, "own"),
PQfnumber(res, "reffmncmorg"),
PQfnumber(res, "reffmncmaddress"),
PQfnumber(res, "ixvsex"),
PQfnumber(res, "tel"),
PQfnumber(res, "eml")
};
while (numread < numrow) {
rec = new FmncQPrsList();
ptr = PQgetvalue(res, numread, fnum[0]); rec->qref = atoll(ptr);
ptr = PQgetvalue(res, numread, fnum[1]); rec->jref = atoll(ptr);
ptr = PQgetvalue(res, numread, fnum[2]); rec->jnum = atol(ptr);
ptr = PQgetvalue(res, numread, fnum[3]); rec->ref = atoll(ptr);
ptr = PQgetvalue(res, numread, fnum[4]); rec->Title.assign(ptr, PQgetlength(res, numread, fnum[4]));
ptr = PQgetvalue(res, numread, fnum[5]); rec->Firstname.assign(ptr, PQgetlength(res, numread, fnum[5]));
ptr = PQgetvalue(res, numread, fnum[6]); rec->Lastname.assign(ptr, PQgetlength(res, numread, fnum[6]));
ptr = PQgetvalue(res, numread, fnum[7]); rec->grp = atoll(ptr);
ptr = PQgetvalue(res, numread, fnum[8]); rec->own = atoll(ptr);
ptr = PQgetvalue(res, numread, fnum[9]); rec->refFmncMOrg = atoll(ptr);
ptr = PQgetvalue(res, numread, fnum[10]); rec->refFmncMAddress = atoll(ptr);
ptr = PQgetvalue(res, numread, fnum[11]); rec->ixVSex = atol(ptr);
ptr = PQgetvalue(res, numread, fnum[12]); rec->Tel.assign(ptr, PQgetlength(res, numread, fnum[12]));
ptr = PQgetvalue(res, numread, fnum[13]); rec->Eml.assign(ptr, PQgetlength(res, numread, fnum[13]));
rst.nodes.push_back(rec);
numread++;
};
};
PQclear(res);
return numread;
};
开发者ID:epsitech,项目名称:fabmaniac,代码行数:60,代码来源:FmncQPrsList.cpp
示例12: printCrosstab
/*
* Output the pivoted resultset with the printTable* functions. Return true
* if successful, false otherwise.
*/
static bool
printCrosstab(const PGresult *results,
int num_columns, pivot_field *piv_columns, int field_for_columns,
int num_rows, pivot_field *piv_rows, int field_for_rows,
int field_for_data)
{
printQueryOpt popt = pset.popt;
printTableContent cont;
int i,
rn;
char col_align;
int *horiz_map;
bool retval = false;
printTableInit(&cont, &popt.topt, popt.title, num_columns + 1, num_rows);
/* Step 1: set target column names (horizontal header) */
/* The name of the first column is kept unchanged by the pivoting */
printTableAddHeader(&cont,
PQfname(results, field_for_rows),
false,
column_type_alignment(PQftype(results,
field_for_rows)));
/*
* To iterate over piv_columns[] by piv_columns[].rank, create a reverse
* map associating each piv_columns[].rank to its index in piv_columns.
* This avoids an O(N^2) loop later.
*/
horiz_map = (int *) pg_malloc(sizeof(int) * num_columns);
for (i = 0; i < num_columns; i++)
horiz_map[piv_columns[i].rank] = i;
/*
* The display alignment depends on its PQftype().
*/
col_align = column_type_alignment(PQftype(results, field_for_data));
for (i = 0; i < num_columns; i++)
{
char *colname;
colname = piv_columns[horiz_map[i]].name ?
piv_columns[horiz_map[i]].name :
(popt.nullPrint ? popt.nullPrint : "");
printTableAddHeader(&cont, colname, false, col_align);
}
pg_free(horiz_map);
/* Step 2: set row names in the first output column (vertical header) */
for (i = 0; i < num_rows; i++)
{
int k = piv_rows[i].rank;
cont.cells[k * (num_columns + 1)] = piv_rows[i].name ?
piv_rows[i].name :
(popt.nullPrint ? popt.nullPrint : "");
}
cont.cellsadded = num_rows * (num_columns + 1);
/*
* Step 3: fill in the content cells.
*/
for (rn = 0; rn < PQntuples(results); rn++)
{
int row_number;
int col_number;
pivot_field *p;
pivot_field elt;
/* Find target row */
if (!PQgetisnull(results, rn, field_for_rows))
elt.name = PQgetvalue(results, rn, field_for_rows);
else
elt.name = NULL;
p = (pivot_field *) bsearch(&elt,
piv_rows,
num_rows,
sizeof(pivot_field),
pivotFieldCompare);
Assert(p != NULL);
row_number = p->rank;
/* Find target column */
if (!PQgetisnull(results, rn, field_for_columns))
elt.name = PQgetvalue(results, rn, field_for_columns);
else
elt.name = NULL;
p = (pivot_field *) bsearch(&elt,
piv_columns,
num_columns,
sizeof(pivot_field),
pivotFieldCompare);
//.........这里部分代码省略.........
开发者ID:MohammadHabbab,项目名称:postgres,代码行数:101,代码来源:crosstabview.c
示例13: PrintResultsInCrosstab
//.........这里部分代码省略.........
for (i = 0; i < PQnfields(res); i++)
{
if (i != field_for_rows && i != field_for_columns)
{
field_for_data = i;
break;
}
}
Assert(field_for_data >= 0);
}
else
{
int num_fields;
/* If a field was given, find out what it is. Only one is allowed. */
num_fields = parseColumnRefs(opt_field_for_data, res, &colsD, 1, ',');
if (num_fields < 1)
goto error_return;
field_for_data = colsD[0];
}
/*
* First part: accumulate the names that go into the vertical and
* horizontal headers, each into an AVL binary tree to build the set of
* DISTINCT values.
*/
for (rn = 0; rn < PQntuples(res); rn++)
{
char *val;
char *val1;
/* horizontal */
val = PQgetisnull(res, rn, field_for_columns) ? NULL :
PQgetvalue(res, rn, field_for_columns);
val1 = NULL;
if (sort_field_for_columns >= 0 &&
!PQgetisnull(res, rn, sort_field_for_columns))
val1 = PQgetvalue(res, rn, sort_field_for_columns);
avlMergeValue(&piv_columns, val, val1);
if (piv_columns.count > CROSSTABVIEW_MAX_COLUMNS)
{
psql_error(_("Maximum number of columns (%d) exceeded\n"),
CROSSTABVIEW_MAX_COLUMNS);
goto error_return;
}
/* vertical */
val = PQgetisnull(res, rn, field_for_rows) ? NULL :
PQgetvalue(res, rn, field_for_rows);
avlMergeValue(&piv_rows, val, NULL);
}
/*
* Second part: Generate sorted arrays from the AVL trees.
*/
num_columns = piv_columns.count;
num_rows = piv_rows.count;
array_columns = (pivot_field *)
pg_malloc(sizeof(pivot_field) * num_columns);
array_rows = (pivot_field *)
pg_malloc(sizeof(pivot_field) * num_rows);
avlCollectFields(&piv_columns, piv_columns.root, array_columns, 0);
avlCollectFields(&piv_rows, piv_rows.root, array_rows, 0);
/*
* Third part: optionally, process the ranking data for the horizontal
* header
*/
if (sort_field_for_columns >= 0)
rankSort(num_columns, array_columns);
/*
* Fourth part: print the crosstab'ed results.
*/
retval = printCrosstab(res,
num_columns, array_columns, field_for_columns,
num_rows, array_rows, field_for_rows,
field_for_data);
error_return:
avlFree(&piv_columns, piv_columns.root);
avlFree(&piv_rows, piv_rows.root);
pg_free(array_columns);
pg_free(array_rows);
pg_free(colsV);
pg_free(colsH);
pg_free(colsD);
return retval;
}
开发者ID:MohammadHabbab,项目名称:postgres,代码行数:101,代码来源:crosstabview.c
示例14: do_failover
static void
do_failover(void)
{
PGresult *res1;
PGresult *res2;
char sqlquery[8192];
int total_nodes = 0;
int visible_nodes = 0;
bool find_best = false;
int i;
int r;
int node;
char nodeConninfo[MAXLEN];
unsigned int uxlogid;
unsigned int uxrecoff;
char last_wal_standby_applied[MAXLEN];
PGconn *nodeConn = NULL;
/*
* will get info about until 50 nodes,
* which seems to be large enough for most scenarios
*/
nodeInfo nodes[50];
nodeInfo best_candidate;
/* first we get info about this node, and update shared memory */
sprintf(sqlquery, "SELECT pg_last_xlog_replay_location()");
res1 = PQexec(myLocalConn, sqlquery);
if (PQresultStatus(res1) != PGRES_TUPLES_OK)
{
log_err(_("PQexec failed: %s.\nReport an invalid value to not be considered as new primary and exit.\n"), PQerrorMessage(myLocalConn));
PQclear(res1);
sprintf(last_wal_standby_applied, "'%X/%X'", 0, 0);
update_shared_memory(last_wal_standby_applied);
exit(ERR_DB_QUERY);
}
/* write last location in shared memory */
update_shared_memory(PQgetvalue(res1, 0, 0));
/*
* we sleep the monitor time + one second
* we bet it should be enough for other repmgrd to update their own data
*/
sleep(SLEEP_MONITOR + 1);
/* get a list of standby nodes, including myself */
sprintf(sqlquery, "SELECT id, conninfo "
" FROM %s.repl_nodes "
" WHERE id IN (SELECT standby_node FROM %s.repl_status) "
" AND cluster = '%s' "
" ORDER BY priority ",
repmgr_schema, repmgr_schema, local_options.cluster_name);
res1 = PQexec(myLocalConn, sqlquery);
if (PQresultStatus(res1) != PGRES_TUPLES_OK)
{
log_err(_("Can't get nodes info: %s\n"), PQerrorMessage(myLocalConn));
PQclear(res1);
PQfinish(myLocalConn);
exit(ERR_DB_QUERY);
}
/* ask for the locations */
for (i = 0; i < PQntuples(res1); i++)
{
node = atoi(PQgetvalue(res1, i, 0));
/* Initialize on false so if we can't reach this node we know that later */
nodes[i].is_ready = false;
strncpy(nodeConninfo, PQgetvalue(res1, i, 1), MAXLEN);
nodeConn = establishDBConnection(nodeConninfo, false);
/* if we can't see the node just skip it */
if (PQstatus(nodeConn) != CONNECTION_OK)
continue;
sqlquery_snprintf(sqlquery, "SELECT repmgr_get_last_standby_location()");
res2 = PQexec(nodeConn, sqlquery);
if (PQresultStatus(res2) != PGRES_TUPLES_OK)
{
log_info(_("Can't get node's last standby location: %s\n"), PQerrorMessage(nodeConn));
log_info(_("Connection details: %s\n"), nodeConninfo);
PQclear(res2);
PQfinish(nodeConn);
continue;
}
visible_nodes++;
if (sscanf(PQgetvalue(res2, 0, 0), "%X/%X", &uxlogid, &uxrecoff) != 2)
log_info(_("could not parse transaction log location \"%s\"\n"), PQgetvalue(res2, 0, 0));
nodes[i].nodeId = node;
nodes[i].xlog_location.xlogid = uxlogid;
nodes[i].xlog_location.xrecoff = uxrecoff;
nodes[i].is_ready = true;
//.........这里部分代码省略.........
开发者ID:klando,项目名称:repmgr,代码行数:101,代码来源:repmgrd.c
示例15: ReceiveAndUnpackTarFile
/*
* Receive a tar format stream from the connection to the server, and unpack
* the contents of it into a directory. Only files, directories and
* symlinks are supported, no other kinds of special files.
*
* If the data is for the main data directory, it will be restored in the
* specified directory. If it's for another tablespace, it will be restored
* in the original directory, since relocation of tablespaces is not
* supported.
*/
static void
ReceiveAndUnpackTarFile(PGconn *conn, PGresult *res, int rownum)
{
char current_path[MAXPGPATH];
char filename[MAXPGPATH];
int current_len_left;
int current_padding = 0;
char *copybuf = NULL;
FILE *file = NULL;
if (PQgetisnull(res, rownum, 0))
strcpy(current_path, basedir);
else
strcpy(current_path, PQgetvalue(res, rownum, 1));
/*
* Get the COPY data
*/
res = PQgetResult(conn);
if (PQresultStatus(res) != PGRES_COPY_OUT)
{
fprintf(stderr, _("%s: could not get COPY data stream: %s"),
progname, PQerrorMessage(conn));
disconnect_and_exit(1);
}
while (1)
{
int r;
if (copybuf != NULL)
{
PQfreemem(copybuf);
copybuf = NULL;
}
r = PQgetCopyData(conn, ©buf, 0);
if (r == -1)
{
/*
* End of chunk
*/
if (file)
fclose(file);
break;
}
else if (r == -2)
{
fprintf(stderr, _("%s: could not read COPY data: %s"),
progname, PQerrorMessage(conn));
disconnect_and_exit(1);
}
if (file == NULL)
{
int filemode;
/*
* No current file, so this must be the header for a new file
*/
if (r != 512)
{
fprintf(stderr, _("%s: invalid tar block header size: %d\n"),
progname, r);
disconnect_and_exit(1);
}
totaldone += 512;
if (sscanf(copybuf + 124, "%11o", ¤t_len_left) != 1)
{
fprintf(stderr, _("%s: could not parse file size\n"),
progname);
disconnect_and_exit(1);
}
/* Set permissions on the file */
if (sscanf(©buf[100], "%07o ", &filemode) != 1)
{
fprintf(stderr, _("%s: could not parse file mode\n"),
progname);
disconnect_and_exit(1);
}
/*
* All files are padded up to 512 bytes
*/
current_padding =
((current_len_left + 511) & ~511) - current_len_left;
//.........这里部分代码省略.........
开发者ID:adunstan,项目名称:postgresql-dev,代码行数:101,代码来源:pg_basebackup.c
|
请发表评论