/*
* Check expected (query runtime) tupdesc suitable for Connectby
*/
static void
validateConnectbyTupleDesc(TupleDesc tupdesc, bool show_branch, bool show_serial)
{
int serial_column = 0;
if (show_serial)
serial_column = 1;
/* are there the correct number of columns */
if (show_branch)
{
if (tupdesc->natts != (CONNECTBY_NCOLS + serial_column))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("invalid return type"),
errdetail("Query-specified return tuple has " \
"wrong number of columns.")));
}
else
{
if (tupdesc->natts != CONNECTBY_NCOLS_NOBRANCH + serial_column)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("invalid return type"),
errdetail("Query-specified return tuple has " \
"wrong number of columns.")));
}
/* check that the types of the first two columns match */
if (tupdesc->attrs[0]->atttypid != tupdesc->attrs[1]->atttypid)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("invalid return type"),
errdetail("First two columns must be the same type.")));
/* check that the type of the third column is INT4 */
if (tupdesc->attrs[2]->atttypid != INT4OID)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("invalid return type"),
errdetail("Third column must be type %s.",
format_type_be(INT4OID))));
/* check that the type of the fourth column is TEXT if applicable */
if (show_branch && tupdesc->attrs[3]->atttypid != TEXTOID)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("invalid return type"),
errdetail("Fourth column must be type %s.",
format_type_be(TEXTOID))));
/* check that the type of the fifth column is INT4 */
if (show_branch && show_serial && tupdesc->attrs[4]->atttypid != INT4OID)
elog(ERROR, "query-specified return tuple not valid for Connectby: "
"fifth column must be type %s", format_type_be(INT4OID));
/* check that the type of the fifth column is INT4 */
if (!show_branch && show_serial && tupdesc->attrs[3]->atttypid != INT4OID)
elog(ERROR, "query-specified return tuple not valid for Connectby: "
"fourth column must be type %s", format_type_be(INT4OID));
/* OK, the tupdesc is valid for our purposes */
}
开发者ID:GisKook,项目名称:Gis,代码行数:66,代码来源:tablefunc.c
示例3: load_categories_hash
/*
* load up the categories hash table
*/
static HTAB *
load_categories_hash(char *cats_sql, MemoryContext per_query_ctx)
{
HTAB *crosstab_hash;
HASHCTL ctl;
int ret;
int proc;
MemoryContext SPIcontext;
/* initialize the category hash table */
MemSet(&ctl, 0, sizeof(ctl));
ctl.keysize = MAX_CATNAME_LEN;
ctl.entrysize = sizeof(crosstab_HashEnt);
ctl.hcxt = per_query_ctx;
/*
* use INIT_CATS, defined above as a guess of how many hash table entries
* to create, initially
*/
crosstab_hash = hash_create("crosstab hash",
INIT_CATS,
&ctl,
HASH_ELEM | HASH_CONTEXT);
/* Connect to SPI manager */
if ((ret = SPI_connect()) < 0)
/* internal error */
elog(ERROR, "load_categories_hash: SPI_connect returned %d", ret);
/* Retrieve the category name rows */
ret = SPI_execute(cats_sql, true, 0);
proc = SPI_processed;
/* Check for qualifying tuples */
if ((ret == SPI_OK_SELECT) && (proc > 0))
{
SPITupleTable *spi_tuptable = SPI_tuptable;
TupleDesc spi_tupdesc = spi_tuptable->tupdesc;
int i;
/*
* The provided categories SQL query must always return one column:
* category - the label or identifier for each column
*/
if (spi_tupdesc->natts != 1)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("provided \"categories\" SQL must " \
"return 1 column of at least one row")));
for (i = 0; i < proc; i++)
{
crosstab_cat_desc *catdesc;
char *catname;
HeapTuple spi_tuple;
/* get the next sql result tuple */
spi_tuple = spi_tuptable->vals[i];
/* get the category from the current sql result tuple */
catname = SPI_getvalue(spi_tuple, spi_tupdesc, 1);
SPIcontext = MemoryContextSwitchTo(per_query_ctx);
catdesc = (crosstab_cat_desc *) palloc(sizeof(crosstab_cat_desc));
catdesc->catname = catname;
catdesc->attidx = i;
/* Add the proc description block to the hashtable */
crosstab_HashTableInsert(crosstab_hash, catdesc);
MemoryContextSwitchTo(SPIcontext);
}
}
if (SPI_finish() != SPI_OK_FINISH)
/* internal error */
elog(ERROR, "load_categories_hash: SPI_finish() failed");
return crosstab_hash;
}
开发者ID:GisKook,项目名称:Gis,代码行数:84,代码来源:tablefunc.c
示例4: geography_as_geojson
Datum geography_as_geojson(PG_FUNCTION_ARGS)
{
LWGEOM *lwgeom = NULL;
GSERIALIZED *g = NULL;
char *geojson;
text *result;
int version;
int option = 0;
int has_bbox = 0;
int precision = OUT_MAX_DOUBLE_PRECISION;
char * srs = NULL;
/* Get the version */
version = PG_GETARG_INT32(0);
if ( version != 1)
{
elog(ERROR, "Only GeoJSON 1 is supported");
PG_RETURN_NULL();
}
/* Get the geography */
if (PG_ARGISNULL(1) ) PG_RETURN_NULL();
g = (GSERIALIZED*)PG_DETOAST_DATUM(PG_GETARG_DATUM(1));
/* Convert to lwgeom so we can run the old functions */
lwgeom = lwgeom_from_gserialized(g);
/* Retrieve precision if any (default is max) */
if (PG_NARGS() >2 && !PG_ARGISNULL(2))
{
precision = PG_GETARG_INT32(2);
if ( precision > OUT_MAX_DOUBLE_PRECISION )
precision = OUT_MAX_DOUBLE_PRECISION;
else if ( precision < 0 ) precision = 0;
}
/* Retrieve output option
* 0 = without option (default)
* 1 = bbox
* 2 = short crs
* 4 = long crs
*/
if (PG_NARGS() >3 && !PG_ARGISNULL(3))
option = PG_GETARG_INT32(3);
if (option & 2 || option & 4)
{
/* Geography only handle srid SRID_DEFAULT */
if (option & 2) srs = getSRSbySRID(SRID_DEFAULT, true);
if (option & 4) srs = getSRSbySRID(SRID_DEFAULT, false);
if (!srs)
{
elog(ERROR, "SRID SRID_DEFAULT unknown in spatial_ref_sys table");
PG_RETURN_NULL();
}
}
if (option & 1) has_bbox = 1;
geojson = lwgeom_to_geojson(lwgeom, srs, precision, has_bbox);
lwgeom_free(lwgeom);
PG_FREE_IF_COPY(g, 1);
if (srs) pfree(srs);
result = cstring2text(geojson);
lwfree(geojson);
PG_RETURN_TEXT_P(result);
}
/* ----------------------------------------------------------------
* BitmapHeapNext
*
* Retrieve next tuple from the BitmapHeapScan node's currentRelation
* ----------------------------------------------------------------
*/
static TupleTableSlot *
BitmapHeapNext(BitmapHeapScanState *node)
{
ExprContext *econtext;
HeapScanDesc scan;
TIDBitmap *tbm;
TBMIterator *tbmiterator;
TBMIterateResult *tbmres;
#ifdef USE_PREFETCH
TBMIterator *prefetch_iterator;
#endif
OffsetNumber targoffset;
TupleTableSlot *slot;
/*
* extract necessary information from index scan node
*/
econtext = node->ss.ps.ps_ExprContext;
slot = node->ss.ss_ScanTupleSlot;
scan = node->ss.ss_currentScanDesc;
tbm = node->tbm;
tbmiterator = node->tbmiterator;
tbmres = node->tbmres;
#ifdef USE_PREFETCH
prefetch_iterator = node->prefetch_iterator;
#endif
/*
* If we haven't yet performed the underlying index scan, do it, and begin
* the iteration over the bitmap.
*
* For prefetching, we use *two* iterators, one for the pages we are
* actually scanning and another that runs ahead of the first for
* prefetching. node->prefetch_pages tracks exactly how many pages ahead
* the prefetch iterator is. Also, node->prefetch_target tracks the
* desired prefetch distance, which starts small and increases up to the
* GUC-controlled maximum, target_prefetch_pages. This is to avoid doing
* a lot of prefetching in a scan that stops after a few tuples because of
* a LIMIT.
*/
if (tbm == NULL)
{
tbm = (TIDBitmap *) MultiExecProcNode(outerPlanState(node));
if (!tbm || !IsA(tbm, TIDBitmap))
elog(ERROR, "unrecognized result from subplan");
node->tbm = tbm;
node->tbmiterator = tbmiterator = tbm_begin_iterate(tbm);
node->tbmres = tbmres = NULL;
#ifdef USE_PREFETCH
if (target_prefetch_pages > 0)
{
node->prefetch_iterator = prefetch_iterator = tbm_begin_iterate(tbm);
node->prefetch_pages = 0;
node->prefetch_target = -1;
}
#endif /* USE_PREFETCH */
}
for (;;)
{
Page dp;
ItemId lp;
/*
* Get next page of results if needed
*/
if (tbmres == NULL)
{
node->tbmres = tbmres = tbm_iterate(tbmiterator);
if (tbmres == NULL)
{
/* no more entries in the bitmap */
break;
}
#ifdef USE_PREFETCH
if (node->prefetch_pages > 0)
{
/* The main iterator has closed the distance by one page */
node->prefetch_pages--;
}
else if (prefetch_iterator)
{
/* Do not let the prefetch iterator get behind the main one */
TBMIterateResult *tbmpre = tbm_iterate(prefetch_iterator);
if (tbmpre == NULL || tbmpre->blockno != tbmres->blockno)
elog(ERROR, "prefetch and main iterators are out of sync");
}
#endif /* USE_PREFETCH */
//.........这里部分代码省略.........
Datum
check_foreign_key(PG_FUNCTION_ARGS)
{
TriggerData *trigdata = (TriggerData *) fcinfo->context;
Trigger *trigger; /* to get trigger name */
int nargs; /* # of args specified in CREATE TRIGGER */
char **args; /* arguments: as described above */
char **args_temp;
int nrefs; /* number of references (== # of plans) */
char action; /* 'R'estrict | 'S'etnull | 'C'ascade */
int nkeys; /* # of key columns */
Datum *kvals; /* key values */
char *relname; /* referencing relation name */
Relation rel; /* triggered relation */
HeapTuple trigtuple = NULL; /* tuple to being changed */
HeapTuple newtuple = NULL; /* tuple to return */
TupleDesc tupdesc; /* tuple description */
EPlan *plan; /* prepared plan(s) */
Oid *argtypes = NULL; /* key types to prepare execution plan */
bool isnull; /* to know is some column NULL or not */
bool isequal = true; /* are keys in both tuples equal (in UPDATE) */
char ident[2 * NAMEDATALEN]; /* to identify myself */
int is_update = 0;
int ret;
int i,
r;
#ifdef DEBUG_QUERY
elog(DEBUG4, "check_foreign_key: Enter Function");
#endif
/*
* Some checks first...
*/
/* Called by trigger manager ? */
if (!CALLED_AS_TRIGGER(fcinfo))
/* internal error */
elog(ERROR, "check_foreign_key: not fired by trigger manager");
/* Should be called for ROW trigger */
if (!TRIGGER_FIRED_FOR_ROW(trigdata->tg_event))
/* internal error */
elog(ERROR, "check_foreign_key: must be fired for row");
/* Not should be called for INSERT */
if (TRIGGER_FIRED_BY_INSERT(trigdata->tg_event))
/* internal error */
elog(ERROR, "check_foreign_key: cannot process INSERT events");
/* Have to check tg_trigtuple - tuple being deleted */
trigtuple = trigdata->tg_trigtuple;
/*
* But if this is UPDATE then we have to return tg_newtuple. Also, if key
* in tg_newtuple is the same as in tg_trigtuple then nothing to do.
*/
is_update = 0;
if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
{
newtuple = trigdata->tg_newtuple;
is_update = 1;
}
trigger = trigdata->tg_trigger;
nargs = trigger->tgnargs;
args = trigger->tgargs;
if (nargs < 5) /* nrefs, action, key, Relation, key - at
* least */
/* internal error */
elog(ERROR, "check_foreign_key: too short %d (< 5) list of arguments", nargs);
nrefs = pg_atoi(args[0], sizeof(int), 0);
if (nrefs < 1)
/* internal error */
elog(ERROR, "check_foreign_key: %d (< 1) number of references specified", nrefs);
action = tolower((unsigned char) *(args[1]));
if (action != 'r' && action != 'c' && action != 's')
/* internal error */
elog(ERROR, "check_foreign_key: invalid action %s", args[1]);
nargs -= 2;
args += 2;
nkeys = (nargs - nrefs) / (nrefs + 1);
if (nkeys <= 0 || nargs != (nrefs + nkeys * (nrefs + 1)))
/* internal error */
elog(ERROR, "check_foreign_key: invalid number of arguments %d for %d references",
nargs + 2, nrefs);
rel = trigdata->tg_relation;
tupdesc = rel->rd_att;
/* Connect to SPI manager */
if ((ret = SPI_connect()) < 0)
/* internal error */
elog(ERROR, "check_foreign_key: SPI_connect returned %d", ret);
/*
* We use SPI plan preparation feature, so allocate space to place key
* values.
*/
//.........这里部分代码省略.........
开发者ID:Tao-Ma,项目名称:postgres,代码行数:101,代码来源:refint.c
示例7: geography_as_gml
Datum geography_as_gml(PG_FUNCTION_ARGS)
{
LWGEOM *lwgeom = NULL;
GSERIALIZED *g = NULL;
char *gml;
text *result;
int version;
char *srs;
int srid = SRID_DEFAULT;
int precision = OUT_MAX_DOUBLE_PRECISION;
int option=0;
int lwopts = LW_GML_IS_DIMS;
static const char *default_prefix = "gml:";
char *prefixbuf;
const char* prefix = default_prefix;
text *prefix_text;
/* Get the version */
version = PG_GETARG_INT32(0);
if ( version != 2 && version != 3 )
{
elog(ERROR, "Only GML 2 and GML 3 are supported");
PG_RETURN_NULL();
}
/* Get the geography */
if ( PG_ARGISNULL(1) ) PG_RETURN_NULL();
g = (GSERIALIZED*)PG_DETOAST_DATUM(PG_GETARG_DATUM(1));
/* Convert to lwgeom so we can run the old functions */
lwgeom = lwgeom_from_gserialized(g);
/* Retrieve precision if any (default is max) */
if (PG_NARGS() >2 && !PG_ARGISNULL(2))
{
precision = PG_GETARG_INT32(2);
if ( precision > OUT_MAX_DOUBLE_PRECISION )
precision = OUT_MAX_DOUBLE_PRECISION;
else if ( precision < 0 ) precision = 0;
}
/* retrieve option */
if (PG_NARGS() >3 && !PG_ARGISNULL(3))
option = PG_GETARG_INT32(3);
/* retrieve prefix */
if (PG_NARGS() >4 && !PG_ARGISNULL(4))
{
prefix_text = PG_GETARG_TEXT_P(4);
if ( VARSIZE(prefix_text)-VARHDRSZ == 0 )
{
prefix = "";
}
else
{
/* +2 is one for the ':' and one for term null */
prefixbuf = palloc(VARSIZE(prefix_text)-VARHDRSZ+2);
memcpy(prefixbuf, VARDATA(prefix_text),
VARSIZE(prefix_text)-VARHDRSZ);
/* add colon and null terminate */
prefixbuf[VARSIZE(prefix_text)-VARHDRSZ] = ':';
prefixbuf[VARSIZE(prefix_text)-VARHDRSZ+1] = '\0';
prefix = prefixbuf;
}
}
if (option & 1) srs = getSRSbySRID(srid, false);
else srs = getSRSbySRID(srid, true);
if (!srs)
{
elog(ERROR, "SRID %d unknown in spatial_ref_sys table", SRID_DEFAULT);
PG_RETURN_NULL();
}
/* Revert lat/lon only with long SRS */
if (option & 1) lwopts |= LW_GML_IS_DEGREE;
if (option & 2) lwopts &= ~LW_GML_IS_DIMS;
if (version == 2)
gml = lwgeom_to_gml2(lwgeom, srs, precision, prefix);
else
gml = lwgeom_to_gml3(lwgeom, srs, precision, lwopts, prefix);
lwgeom_free(lwgeom);
PG_FREE_IF_COPY(g, 1);
result = cstring2text(gml);
lwfree(gml);
PG_RETURN_TEXT_P(result);
}
/*
* CheckMyDatabase -- fetch information from the pg_database entry for our DB
*/
static void
CheckMyDatabase(const char *name, bool am_superuser)
{
HeapTuple tup;
Form_pg_database dbform;
char *collate;
char *ctype;
/* Fetch our pg_database row normally, via syscache */
tup = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(MyDatabaseId));
if (!HeapTupleIsValid(tup))
elog(ERROR, "cache lookup failed for database %u", MyDatabaseId);
dbform = (Form_pg_database) GETSTRUCT(tup);
/* This recheck is strictly paranoia */
if (strcmp(name, NameStr(dbform->datname)) != 0)
ereport(FATAL,
(errcode(ERRCODE_UNDEFINED_DATABASE),
errmsg("database \"%s\" has disappeared from pg_database",
name),
errdetail("Database OID %u now seems to belong to \"%s\".",
MyDatabaseId, NameStr(dbform->datname))));
/*
* Check permissions to connect to the database.
*
* These checks are not enforced when in standalone mode, so that there is
* a way to recover from disabling all access to all databases, for
* example "UPDATE pg_database SET datallowconn = false;".
*
* We do not enforce them for autovacuum worker processes either.
*/
if (IsUnderPostmaster && !IsAutoVacuumWorkerProcess())
{
/*
* Check that the database is currently allowing connections.
*/
if (!dbform->datallowconn)
ereport(FATAL,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("database \"%s\" is not currently accepting connections",
name)));
/*
* Check privilege to connect to the database. (The am_superuser test
* is redundant, but since we have the flag, might as well check it
* and save a few cycles.)
*/
if (!am_superuser &&
pg_database_aclcheck(MyDatabaseId, GetUserId(),
ACL_CONNECT) != ACLCHECK_OK)
ereport(FATAL,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied for database \"%s\"", name),
errdetail("User does not have CONNECT privilege.")));
/*
* Check connection limit for this database.
*
* There is a race condition here --- we create our PGPROC before
* checking for other PGPROCs. If two backends did this at about the
* same time, they might both think they were over the limit, while
* ideally one should succeed and one fail. Getting that to work
* exactly seems more trouble than it is worth, however; instead we
* just document that the connection limit is approximate.
*/
if (dbform->datconnlimit >= 0 &&
!am_superuser &&
CountDBBackends(MyDatabaseId) > dbform->datconnlimit)
ereport(FATAL,
(errcode(ERRCODE_TOO_MANY_CONNECTIONS),
errmsg("too many connections for database \"%s\"",
name)));
}
/*
* OK, we're golden. Next to-do item is to save the encoding info out of
* the pg_database tuple.
*/
SetDatabaseEncoding(dbform->encoding);
/* Record it as a GUC internal option, too */
SetConfigOption("server_encoding", GetDatabaseEncodingName(),
PGC_INTERNAL, PGC_S_OVERRIDE);
/* If we have no other source of client_encoding, use server encoding */
SetConfigOption("client_encoding", GetDatabaseEncodingName(),
PGC_BACKEND, PGC_S_DYNAMIC_DEFAULT);
/* assign locale variables */
collate = NameStr(dbform->datcollate);
ctype = NameStr(dbform->datctype);
if (pg_perm_setlocale(LC_COLLATE, collate) == NULL)
ereport(FATAL,
(errmsg("database locale is incompatible with operating system"),
errdetail("The database was initialized with LC_COLLATE \"%s\", "
" which is not recognized by setlocale().", collate),
errhint("Recreate the database with another locale or install the missing locale.")));
//.........这里部分代码省略.........
/* --------------------------------
* InitPostgres
* Initialize POSTGRES.
*
* The database can be specified by name, using the in_dbname parameter, or by
* OID, using the dboid parameter. In the latter case, the actual database
* name can be returned to the caller in out_dbname. If out_dbname isn't
* NULL, it must point to a buffer of size NAMEDATALEN.
*
* In bootstrap mode no parameters are used. The autovacuum launcher process
* doesn't use any parameters either, because it only goes far enough to be
* able to read pg_database; it doesn't connect to any particular database.
* In walsender mode only username is used.
*
* As of PostgreSQL 8.2, we expect InitProcess() was already called, so we
* already have a PGPROC struct ... but it's not completely filled in yet.
*
* Note:
* Be very careful with the order of calls in the InitPostgres function.
* --------------------------------
*/
void
InitPostgres(const char *in_dbname, Oid dboid, const char *username,
char *out_dbname)
{
bool bootstrap = IsBootstrapProcessingMode();
bool am_superuser;
char *fullpath;
char dbname[NAMEDATALEN];
elog(DEBUG3, "InitPostgres");
/*
* Add my PGPROC struct to the ProcArray.
*
* Once I have done this, I am visible to other backends!
*/
InitProcessPhase2();
/*
* Initialize my entry in the shared-invalidation manager's array of
* per-backend data.
*
* Sets up MyBackendId, a unique backend identifier.
*/
MyBackendId = InvalidBackendId;
SharedInvalBackendInit(false);
if (MyBackendId > MaxBackends || MyBackendId <= 0)
elog(FATAL, "bad backend ID: %d", MyBackendId);
/* Now that we have a BackendId, we can participate in ProcSignal */
ProcSignalInit(MyBackendId);
/*
* Also set up timeout handlers needed for backend operation. We need
* these in every case except bootstrap.
*/
if (!bootstrap)
{
RegisterTimeout(DEADLOCK_TIMEOUT, CheckDeadLock);
RegisterTimeout(STATEMENT_TIMEOUT, StatementTimeoutHandler);
RegisterTimeout(LOCK_TIMEOUT, LockTimeoutHandler);
}
/*
* bufmgr needs another initialization call too
*/
InitBufferPoolBackend();
/*
* Initialize local process's access to XLOG.
*/
if (IsUnderPostmaster)
{
/*
* The postmaster already started the XLOG machinery, but we need to
* call InitXLOGAccess(), if the system isn't in hot-standby mode.
* This is handled by calling RecoveryInProgress and ignoring the
* result.
*/
(void) RecoveryInProgress();
}
else
{
/*
* We are either a bootstrap process or a standalone backend. Either
* way, start up the XLOG machinery, and register to have it closed
* down at exit.
*/
StartupXLOG();
on_shmem_exit(ShutdownXLOG, 0);
}
/*
* Initialize the relation cache and the system catalog caches. Note that
* no catalog access happens here; we only set up the hashtable structure.
* We must do this before starting a transaction because transaction abort
* would try to touch these hashtables.
//.........这里部分代码省略.........
static void
worker_spi_main(Datum main_arg)
{
/* Register functions for SIGTERM/SIGHUP management */
pqsignal(SIGHUP, worker_spi_sighup);
pqsignal(SIGTERM, worker_spi_sigterm);
/* We're now ready to receive signals */
BackgroundWorkerUnblockSignals();
/* Connect to our database */
BackgroundWorkerInitializeConnection("postgres", NULL);
while (!got_sigterm)
{
int ret;
int rc;
StringInfoData buf;
/*
* Background workers mustn't call usleep() or any direct equivalent:
* instead, they may wait on their process latch, which sleeps as
* necessary, but is awakened if postmaster dies. That way the
* background process goes away immediately in an emergency.
*/
rc = WaitLatch(&MyProc->procLatch,
WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
1000L);
ResetLatch(&MyProc->procLatch);
/* emergency bailout if postmaster has died */
if (rc & WL_POSTMASTER_DEATH)
proc_exit(1);
StartTransactionCommand();
SPI_connect();
PushActiveSnapshot(GetTransactionSnapshot());
initStringInfo(&buf);
/* Build the query string */
appendStringInfo(&buf,
"SELECT count(*) FROM pg_class;");
ret = SPI_execute(buf.data, true, 0);
/* Some error messages in case of incorrect handling */
if (ret != SPI_OK_SELECT)
elog(FATAL, "SPI_execute failed: error code %d", ret);
if (SPI_processed > 0)
{
int32 count;
bool isnull;
count = DatumGetInt32(SPI_getbinval(SPI_tuptable->vals[0],
SPI_tuptable->tupdesc,
1, &isnull));
elog(LOG, "Currently %d relations in database",
count);
}
SPI_finish();
PopActiveSnapshot();
CommitTransactionCommand();
}
proc_exit(0);
}
/*
* Validator for an SP-GiST opclass.
*
* Some of the checks done here cover the whole opfamily, and therefore are
* redundant when checking each opclass in a family. But they don't run long
* enough to be much of a problem, so we accept the duplication rather than
* complicate the amvalidate API.
*/
bool
spgvalidate(Oid opclassoid)
{
bool result = true;
HeapTuple classtup;
Form_pg_opclass classform;
Oid opfamilyoid;
Oid opcintype;
char *opclassname;
HeapTuple familytup;
Form_pg_opfamily familyform;
char *opfamilyname;
CatCList *proclist,
*oprlist;
List *grouplist;
OpFamilyOpFuncGroup *opclassgroup;
int i;
ListCell *lc;
/* Fetch opclass information */
classtup = SearchSysCache1(CLAOID, ObjectIdGetDatum(opclassoid));
if (!HeapTupleIsValid(classtup))
elog(ERROR, "cache lookup failed for operator class %u", opclassoid);
classform = (Form_pg_opclass) GETSTRUCT(classtup);
opfamilyoid = classform->opcfamily;
opcintype = classform->opcintype;
opclassname = NameStr(classform->opcname);
/* Fetch opfamily information */
familytup = SearchSysCache1(OPFAMILYOID, ObjectIdGetDatum(opfamilyoid));
if (!HeapTupleIsValid(familytup))
elog(ERROR, "cache lookup failed for operator family %u", opfamilyoid);
familyform = (Form_pg_opfamily) GETSTRUCT(familytup);
opfamilyname = NameStr(familyform->opfname);
/* Fetch all operators and support functions of the opfamily */
oprlist = SearchSysCacheList1(AMOPSTRATEGY, ObjectIdGetDatum(opfamilyoid));
proclist = SearchSysCacheList1(AMPROCNUM, ObjectIdGetDatum(opfamilyoid));
/* Check individual support functions */
for (i = 0; i < proclist->n_members; i++)
{
HeapTuple proctup = &proclist->members[i]->tuple;
Form_pg_amproc procform = (Form_pg_amproc) GETSTRUCT(proctup);
bool ok;
/*
* All SP-GiST support functions should be registered with matching
* left/right types
*/
if (procform->amproclefttype != procform->amprocrighttype)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("spgist operator family \"%s\" contains support procedure %s with cross-type registration",
opfamilyname,
format_procedure(procform->amproc))));
result = false;
}
/* Check procedure numbers and function signatures */
switch (procform->amprocnum)
{
case SPGIST_CONFIG_PROC:
case SPGIST_CHOOSE_PROC:
case SPGIST_PICKSPLIT_PROC:
case SPGIST_INNER_CONSISTENT_PROC:
ok = check_amproc_signature(procform->amproc, VOIDOID, true,
2, 2, INTERNALOID, INTERNALOID);
break;
case SPGIST_LEAF_CONSISTENT_PROC:
ok = check_amproc_signature(procform->amproc, BOOLOID, true,
2, 2, INTERNALOID, INTERNALOID);
break;
default:
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("spgist operator family \"%s\" contains function %s with invalid support number %d",
opfamilyname,
format_procedure(procform->amproc),
procform->amprocnum)));
result = false;
continue; /* don't want additional message */
}
if (!ok)
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("spgist operator family \"%s\" contains function %s with wrong signature for support number %d",
//.........这里部分代码省略.........
/* Process one per-dbspace directory for ResetUnloggedRelations */
static void
ResetUnloggedRelationsInDbspaceDir(const char *dbspacedirname, int op)
{
DIR *dbspace_dir;
struct dirent *de;
char rm_path[MAXPGPATH * 2];
/* Caller must specify at least one operation. */
Assert((op & (UNLOGGED_RELATION_CLEANUP | UNLOGGED_RELATION_INIT)) != 0);
/*
* Cleanup is a two-pass operation. First, we go through and identify all
* the files with init forks. Then, we go through again and nuke
* everything with the same OID except the init fork.
*/
if ((op & UNLOGGED_RELATION_CLEANUP) != 0)
{
HTAB *hash = NULL;
HASHCTL ctl;
/* Open the directory. */
dbspace_dir = AllocateDir(dbspacedirname);
if (dbspace_dir == NULL)
{
elog(LOG,
"could not open dbspace directory \"%s\": %m",
dbspacedirname);
return;
}
/*
* It's possible that someone could create a ton of unlogged relations
* in the same database & tablespace, so we'd better use a hash table
* rather than an array or linked list to keep track of which files
* need to be reset. Otherwise, this cleanup operation would be
* O(n^2).
*/
ctl.keysize = sizeof(unlogged_relation_entry);
ctl.entrysize = sizeof(unlogged_relation_entry);
hash = hash_create("unlogged hash", 32, &ctl, HASH_ELEM);
/* Scan the directory. */
while ((de = ReadDir(dbspace_dir, dbspacedirname)) != NULL)
{
ForkNumber forkNum;
int oidchars;
unlogged_relation_entry ent;
/* Skip anything that doesn't look like a relation data file. */
if (!parse_filename_for_nontemp_relation(de->d_name, &oidchars,
&forkNum))
continue;
/* Also skip it unless this is the init fork. */
if (forkNum != INIT_FORKNUM)
continue;
/*
* Put the OID portion of the name into the hash table, if it
* isn't already.
*/
memset(ent.oid, 0, sizeof(ent.oid));
memcpy(ent.oid, de->d_name, oidchars);
hash_search(hash, &ent, HASH_ENTER, NULL);
}
/* Done with the first pass. */
FreeDir(dbspace_dir);
/*
* If we didn't find any init forks, there's no point in continuing;
* we can bail out now.
*/
if (hash_get_num_entries(hash) == 0)
{
hash_destroy(hash);
return;
}
/*
* Now, make a second pass and remove anything that matches. First,
* reopen the directory.
*/
dbspace_dir = AllocateDir(dbspacedirname);
if (dbspace_dir == NULL)
{
elog(LOG,
"could not open dbspace directory \"%s\": %m",
dbspacedirname);
hash_destroy(hash);
return;
}
/* Scan the directory. */
while ((de = ReadDir(dbspace_dir, dbspacedirname)) != NULL)
{
ForkNumber forkNum;
int oidchars;
bool found;
//.........这里部分代码省略.........
//.........这里部分代码省略.........
case HEAPTUPLE_RECENTLY_DEAD:
recent_dead = true;
/*
* This tuple may soon become DEAD. Update the hint field so
* that the page is reconsidered for pruning in future.
*/
heap_prune_record_prunable(prstate,
HeapTupleHeaderGetXmax(htup));
break;
case HEAPTUPLE_DELETE_IN_PROGRESS:
/*
* This tuple may soon become DEAD. Update the hint field so
* that the page is reconsidered for pruning in future.
*/
heap_prune_record_prunable(prstate,
HeapTupleHeaderGetXmax(htup));
break;
case HEAPTUPLE_LIVE:
case HEAPTUPLE_INSERT_IN_PROGRESS:
/*
* If we wanted to optimize for aborts, we might consider
* marking the page prunable when we see INSERT_IN_PROGRESS.
* But we don't. See related decisions about when to mark the
* page prunable in heapam.c.
*/
break;
default:
elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
break;
}
/*
* Remember the last DEAD tuple seen. We will advance past
* RECENTLY_DEAD tuples just in case there's a DEAD one after them;
* but we can't advance past anything else. (XXX is it really worth
* continuing to scan beyond RECENTLY_DEAD? The case where we will
* find another DEAD tuple is a fairly unusual corner case.)
*/
if (tupdead)
{
latestdead = offnum;
HeapTupleHeaderAdvanceLatestRemovedXid(htup,
&prstate->latestRemovedXid);
}
else if (!recent_dead)
break;
/*
* If the tuple is not HOT-updated, then we are at the end of this
* HOT-update chain.
*/
if (!HeapTupleHeaderIsHotUpdated(htup))
break;
/*
* Advance to next chain member.
*/
Assert(ItemPointerGetBlockNumber(&htup->t_ctid) ==
BufferGetBlockNumber(buffer));
offnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
请发表评论