/*
* SQL function json_populate_record
*
* set fields in a record from the argument json
*
* Code adapted shamelessly from hstore's populate_record
* which is in turn partly adapted from record_out.
*
* The json is decomposed into a hash table, in which each
* field in the record is then looked up by name.
*/
Datum
json_populate_record(PG_FUNCTION_ARGS)
{
Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, 0);
text *json;
bool use_json_as_text;
HTAB *json_hash;
HeapTupleHeader rec;
Oid tupType;
int32 tupTypmod;
TupleDesc tupdesc;
HeapTupleData tuple;
HeapTuple rettuple;
RecordIOData *my_extra;
int ncolumns;
int i;
Datum *values;
bool *nulls;
char fname[NAMEDATALEN];
JsonHashEntry hashentry;
use_json_as_text = PG_ARGISNULL(2) ? false : PG_GETARG_BOOL(2);
if (!type_is_rowtype(argtype))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("first argument must be a rowtype")));
if (PG_ARGISNULL(0))
{
if (PG_ARGISNULL(1))
PG_RETURN_NULL();
rec = NULL;
/*
* have no tuple to look at, so the only source of type info is the
* argtype. The lookup_rowtype_tupdesc call below will error out if we
* don't have a known composite type oid here.
*/
tupType = argtype;
tupTypmod = -1;
}
else
{
rec = PG_GETARG_HEAPTUPLEHEADER(0);
if (PG_ARGISNULL(1))
PG_RETURN_POINTER(rec);
/* Extract type info from the tuple itself */
tupType = HeapTupleHeaderGetTypeId(rec);
tupTypmod = HeapTupleHeaderGetTypMod(rec);
}
json = PG_GETARG_TEXT_P(1);
json_hash = get_json_object_as_hash(json, "json_populate_record", use_json_as_text);
/*
* if the input json is empty, we can only skip the rest if we were passed
* in a non-null record, since otherwise there may be issues with domain
* nulls.
*/
if (hash_get_num_entries(json_hash) == 0 && rec)
PG_RETURN_POINTER(rec);
tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
ncolumns = tupdesc->natts;
if (rec)
{
/* Build a temporary HeapTuple control structure */
tuple.t_len = HeapTupleHeaderGetDatumLength(rec);
ItemPointerSetInvalid(&(tuple.t_self));
tuple.t_tableOid = InvalidOid;
tuple.t_data = rec;
}
/*
* We arrange to look up the needed I/O info just once per series of
* calls, assuming the record type doesn't change underneath us.
*/
my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL ||
my_extra->ncolumns != ncolumns)
{
fcinfo->flinfo->fn_extra =
//.........这里部分代码省略.........
/*
* _hash_first() -- Find the first item in a scan.
*
* Find the first item in the index that
* satisfies the qualification associated with the scan descriptor. On
* success, the page containing the current index tuple is read locked
* and pinned, and the scan's opaque data entry is updated to
* include the buffer.
*/
bool
_hash_first(IndexScanDesc scan, ScanDirection dir)
{
Relation rel = scan->indexRelation;
HashScanOpaque so = (HashScanOpaque) scan->opaque;
ScanKey cur;
uint32 hashkey;
Bucket bucket;
BlockNumber blkno;
BlockNumber oldblkno = InvalidBuffer;
bool retry = false;
Buffer buf;
Buffer metabuf;
Page page;
HashPageOpaque opaque;
HashMetaPage metap;
IndexTuple itup;
ItemPointer current;
OffsetNumber offnum;
pgstat_count_index_scan(rel);
current = &(so->hashso_curpos);
ItemPointerSetInvalid(current);
/*
* We do not support hash scans with no index qualification, because we
* would have to read the whole index rather than just one bucket. That
* creates a whole raft of problems, since we haven't got a practical way
* to lock all the buckets against splits or compactions.
*/
if (scan->numberOfKeys < 1)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("hash indexes do not support whole-index scans")));
/* There may be more than one index qual, but we hash only the first */
cur = &scan->keyData[0];
/* We support only single-column hash indexes */
Assert(cur->sk_attno == 1);
/* And there's only one operator strategy, too */
Assert(cur->sk_strategy == HTEqualStrategyNumber);
/*
* If the constant in the index qual is NULL, assume it cannot match any
* items in the index.
*/
if (cur->sk_flags & SK_ISNULL)
return false;
/*
* Okay to compute the hash key. We want to do this before acquiring any
* locks, in case a user-defined hash function happens to be slow.
*
* If scankey operator is not a cross-type comparison, we can use the
* cached hash function; otherwise gotta look it up in the catalogs.
*
* We support the convention that sk_subtype == InvalidOid means the
* opclass input type; this is a hack to simplify life for ScanKeyInit().
*/
if (cur->sk_subtype == rel->rd_opcintype[0] ||
cur->sk_subtype == InvalidOid)
hashkey = _hash_datum2hashkey(rel, cur->sk_argument);
else
hashkey = _hash_datum2hashkey_type(rel, cur->sk_argument,
cur->sk_subtype);
so->hashso_sk_hash = hashkey;
/* Read the metapage */
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ, LH_META_PAGE);
metap = HashPageGetMeta(BufferGetPage(metabuf));
/*
* Loop until we get a lock on the correct target bucket.
*/
for (;;)
{
/*
* Compute the target bucket number, and convert to block number.
*/
bucket = _hash_hashkey2bucket(hashkey,
metap->hashm_maxbucket,
metap->hashm_highmask,
metap->hashm_lowmask);
blkno = BUCKET_TO_BLKNO(metap, bucket);
/* Release metapage lock, but keep pin. */
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
//.........这里部分代码省略.........
/* ----------
* toast_flatten_tuple_attribute -
*
* If a Datum is of composite type, "flatten" it to contain no toasted fields.
* This must be invoked on any potentially-composite field that is to be
* inserted into a tuple. Doing this preserves the invariant that toasting
* goes only one level deep in a tuple.
* ----------
*/
Datum
toast_flatten_tuple_attribute(Datum value,
Oid typeId, int32 typeMod)
{
TupleDesc tupleDesc;
HeapTupleHeader olddata;
HeapTupleHeader new_data;
int32 new_len;
HeapTupleData tmptup;
Form_pg_attribute *att;
int numAttrs;
int i;
bool need_change = false;
bool has_nulls = false;
Datum toast_values[MaxTupleAttributeNumber];
bool toast_isnull[MaxTupleAttributeNumber];
bool toast_free[MaxTupleAttributeNumber];
/*
* See if it's a composite type, and get the tupdesc if so.
*/
tupleDesc = lookup_rowtype_tupdesc_noerror(typeId, typeMod, true);
if (tupleDesc == NULL)
return value; /* not a composite type */
tupleDesc = CreateTupleDescCopy(tupleDesc);
att = tupleDesc->attrs;
numAttrs = tupleDesc->natts;
/*
* Break down the tuple into fields.
*/
olddata = DatumGetHeapTupleHeader(value);
Assert(typeId == HeapTupleHeaderGetTypeId(olddata));
Assert(typeMod == HeapTupleHeaderGetTypMod(olddata));
/* Build a temporary HeapTuple control structure */
tmptup.t_len = HeapTupleHeaderGetDatumLength(olddata);
ItemPointerSetInvalid(&(tmptup.t_self));
tmptup.t_tableOid = InvalidOid;
tmptup.t_data = olddata;
Assert(numAttrs <= MaxTupleAttributeNumber);
heap_deform_tuple(&tmptup, tupleDesc, toast_values, toast_isnull);
memset(toast_free, 0, numAttrs * sizeof(bool));
for (i = 0; i < numAttrs; i++)
{
/*
* Look at non-null varlena attributes
*/
if (toast_isnull[i])
has_nulls = true;
else if (att[i]->attlen == -1)
{
varattrib *new_value;
new_value = (varattrib *) DatumGetPointer(toast_values[i]);
if (VARATT_IS_EXTENDED(new_value))
{
new_value = heap_tuple_untoast_attr(new_value);
toast_values[i] = PointerGetDatum(new_value);
toast_free[i] = true;
need_change = true;
}
}
}
/*
* If nothing to untoast, just return the original tuple.
*/
if (!need_change)
{
FreeTupleDesc(tupleDesc);
return value;
}
/*
* Calculate the new size of the tuple. Header size should not change,
* but data size might.
*/
new_len = offsetof(HeapTupleHeaderData, t_bits);
if (has_nulls)
new_len += BITMAPLEN(numAttrs);
if (olddata->t_infomask & HEAP_HASOID)
new_len += sizeof(Oid);
new_len = MAXALIGN(new_len);
Assert(new_len == olddata->t_hoff);
new_len += heap_compute_data_size(tupleDesc, toast_values, toast_isnull);
new_data = (HeapTupleHeader) palloc0(new_len);
//.........这里部分代码省略.........
/*
* record_cmp()
* Internal comparison function for records.
*
* Returns -1, 0 or 1
*
* Do not assume that the two inputs are exactly the same record type;
* for instance we might be comparing an anonymous ROW() construct against a
* named composite type. We will compare as long as they have the same number
* of non-dropped columns of the same types.
*/
static int
record_cmp(FunctionCallInfo fcinfo)
{
HeapTupleHeader record1 = PG_GETARG_HEAPTUPLEHEADER(0);
HeapTupleHeader record2 = PG_GETARG_HEAPTUPLEHEADER(1);
int result = 0;
Oid tupType1;
Oid tupType2;
int32 tupTypmod1;
int32 tupTypmod2;
TupleDesc tupdesc1;
TupleDesc tupdesc2;
HeapTupleData tuple1;
HeapTupleData tuple2;
int ncolumns1;
int ncolumns2;
RecordCompareData *my_extra;
int ncols;
Datum *values1;
Datum *values2;
bool *nulls1;
bool *nulls2;
int i1;
int i2;
int j;
/* Extract type info from the tuples */
tupType1 = HeapTupleHeaderGetTypeId(record1);
tupTypmod1 = HeapTupleHeaderGetTypMod(record1);
tupdesc1 = lookup_rowtype_tupdesc(tupType1, tupTypmod1);
ncolumns1 = tupdesc1->natts;
tupType2 = HeapTupleHeaderGetTypeId(record2);
tupTypmod2 = HeapTupleHeaderGetTypMod(record2);
tupdesc2 = lookup_rowtype_tupdesc(tupType2, tupTypmod2);
ncolumns2 = tupdesc2->natts;
/* Build temporary HeapTuple control structures */
tuple1.t_len = HeapTupleHeaderGetDatumLength(record1);
ItemPointerSetInvalid(&(tuple1.t_self));
tuple1.t_tableOid = InvalidOid;
tuple1.t_data = record1;
tuple2.t_len = HeapTupleHeaderGetDatumLength(record2);
ItemPointerSetInvalid(&(tuple2.t_self));
tuple2.t_tableOid = InvalidOid;
tuple2.t_data = record2;
/*
* We arrange to look up the needed comparison info just once per series
* of calls, assuming the record types don't change underneath us.
*/
ncols = Max(ncolumns1, ncolumns2);
my_extra = (RecordCompareData *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL ||
my_extra->ncolumns < ncols)
{
fcinfo->flinfo->fn_extra =
MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
sizeof(RecordCompareData) - sizeof(ColumnCompareData)
+ ncols * sizeof(ColumnCompareData));
my_extra = (RecordCompareData *) fcinfo->flinfo->fn_extra;
my_extra->ncolumns = ncols;
my_extra->record1_type = InvalidOid;
my_extra->record1_typmod = 0;
my_extra->record2_type = InvalidOid;
my_extra->record2_typmod = 0;
}
if (my_extra->record1_type != tupType1 ||
my_extra->record1_typmod != tupTypmod1 ||
my_extra->record2_type != tupType2 ||
my_extra->record2_typmod != tupTypmod2)
{
MemSet(my_extra->columns, 0, ncols * sizeof(ColumnCompareData));
my_extra->record1_type = tupType1;
my_extra->record1_typmod = tupTypmod1;
my_extra->record2_type = tupType2;
my_extra->record2_typmod = tupTypmod2;
}
/* Break down the tuples into fields */
values1 = (Datum *) palloc(ncolumns1 * sizeof(Datum));
nulls1 = (bool *) palloc(ncolumns1 * sizeof(bool));
heap_deform_tuple(&tuple1, tupdesc1, values1, nulls1);
values2 = (Datum *) palloc(ncolumns2 * sizeof(Datum));
nulls2 = (bool *) palloc(ncolumns2 * sizeof(bool));
heap_deform_tuple(&tuple2, tupdesc2, values2, nulls2);
/*
* Scan corresponding columns, allowing for dropped columns in different
//.........这里部分代码省略.........
Datum
gistrescan(PG_FUNCTION_ARGS)
{
IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ScanKey key = (ScanKey) PG_GETARG_POINTER(1);
GISTScanOpaque so;
int i;
so = (GISTScanOpaque) scan->opaque;
if (so != NULL)
{
/* rescan an existing indexscan --- reset state */
gistfreestack(so->stack);
so->stack = NULL;
/* drop pins on buffers -- no locks held */
if (BufferIsValid(so->curbuf))
{
ReleaseBuffer(so->curbuf);
so->curbuf = InvalidBuffer;
}
}
else
{
/* initialize opaque data */
so = (GISTScanOpaque) palloc(sizeof(GISTScanOpaqueData));
so->stack = NULL;
so->tempCxt = createTempGistContext();
so->curbuf = InvalidBuffer;
so->giststate = (GISTSTATE *) palloc(sizeof(GISTSTATE));
initGISTstate(so->giststate, scan->indexRelation);
scan->opaque = so;
}
/*
* Clear all the pointers.
*/
ItemPointerSetInvalid(&so->curpos);
so->nPageData = so->curPageData = 0;
so->qual_ok = true;
/* Update scan key, if a new one is given */
if (key && scan->numberOfKeys > 0)
{
memmove(scan->keyData, key,
scan->numberOfKeys * sizeof(ScanKeyData));
/*
* Modify the scan key so that all the Consistent method is called for
* all comparisons. The original operator is passed to the Consistent
* function in the form of its strategy number, which is available
* from the sk_strategy field, and its subtype from the sk_subtype
* field.
*
* Next, if any of keys is a NULL and that key is not marked with
* SK_SEARCHNULL/SK_SEARCHNOTNULL then nothing can be found (ie, we
* assume all indexable operators are strict).
*/
for (i = 0; i < scan->numberOfKeys; i++)
{
ScanKey skey = &(scan->keyData[i]);
skey->sk_func = so->giststate->consistentFn[skey->sk_attno - 1];
if (skey->sk_flags & SK_ISNULL)
{
if (!(skey->sk_flags & (SK_SEARCHNULL | SK_SEARCHNOTNULL)))
so->qual_ok = false;
}
}
}
PG_RETURN_VOID();
}
/*
* heap_formtuple
*
* construct a tuple from the given values[] and nulls[] arrays
*
* Null attributes are indicated by a 'n' in the appropriate byte
* of nulls[]. Non-null attributes are indicated by a ' ' (space).
*
* OLD API with char 'n'/' ' convention for indicating nulls.
* This is deprecated and should not be used in new code, but we keep it
* around for use by old add-on modules.
*/
HeapTuple
heap_formtuple(TupleDesc tupleDescriptor,
Datum *values,
char *nulls)
{
HeapTuple tuple; /* return tuple */
HeapTupleHeader td; /* tuple data */
Size len,
data_len;
int hoff;
bool hasnull = false;
Form_pg_attribute *att = tupleDescriptor->attrs;
int numberOfAttributes = tupleDescriptor->natts;
int i;
if (numberOfAttributes > MaxTupleAttributeNumber)
ereport(ERROR,
(errcode(ERRCODE_TOO_MANY_COLUMNS),
errmsg("number of columns (%d) exceeds limit (%d)",
numberOfAttributes, MaxTupleAttributeNumber)));
/*
* Check for nulls and embedded tuples; expand any toasted attributes in
* embedded tuples. This preserves the invariant that toasting can only
* go one level deep.
*
* We can skip calling toast_flatten_tuple_attribute() if the attribute
* couldn't possibly be of composite type. All composite datums are
* varlena and have alignment 'd'; furthermore they aren't arrays. Also,
* if an attribute is already toasted, it must have been sent to disk
* already and so cannot contain toasted attributes.
*/
for (i = 0; i < numberOfAttributes; i++)
{
if (nulls[i] != ' ')
hasnull = true;
else if (att[i]->attlen == -1 &&
att[i]->attalign == 'd' &&
att[i]->attndims == 0 &&
!VARATT_IS_EXTENDED(values[i]))
{
values[i] = toast_flatten_tuple_attribute(values[i],
att[i]->atttypid,
att[i]->atttypmod);
}
}
/*
* Determine total space needed
*/
len = offsetof(HeapTupleHeaderData, t_bits);
if (hasnull)
len += BITMAPLEN(numberOfAttributes);
if (tupleDescriptor->tdhasoid)
len += sizeof(Oid);
hoff = len = MAXALIGN(len); /* align user data safely */
data_len = ComputeDataSize(tupleDescriptor, values, nulls);
len += data_len;
/*
* Allocate and zero the space needed. Note that the tuple body and
* HeapTupleData management structure are allocated in one chunk.
*/
tuple = (HeapTuple) palloc0(HEAPTUPLESIZE + len);
tuple->t_data = td = (HeapTupleHeader) ((char *) tuple + HEAPTUPLESIZE);
/*
* And fill in the information. Note we fill the Datum fields even though
* this tuple may never become a Datum.
*/
tuple->t_len = len;
ItemPointerSetInvalid(&(tuple->t_self));
HeapTupleHeaderSetDatumLength(td, len);
HeapTupleHeaderSetTypeId(td, tupleDescriptor->tdtypeid);
HeapTupleHeaderSetTypMod(td, tupleDescriptor->tdtypmod);
HeapTupleHeaderSetNatts(td, numberOfAttributes);
td->t_hoff = hoff;
if (tupleDescriptor->tdhasoid) /* else leave infomask = 0 */
td->t_infomask = HEAP_HASOID;
//.........这里部分代码省略.........
static void
populate_recordset_object_end(void *state)
{
PopulateRecordsetState _state = (PopulateRecordsetState) state;
HTAB *json_hash = _state->json_hash;
Datum *values;
bool *nulls;
char fname[NAMEDATALEN];
int i;
RecordIOData *my_extra = _state->my_extra;
int ncolumns = my_extra->ncolumns;
TupleDesc tupdesc = _state->ret_tdesc;
JsonHashEntry hashentry;
HeapTupleHeader rec = _state->rec;
HeapTuple rettuple;
if (_state->lex->lex_level > 1)
return;
values = (Datum *) palloc(ncolumns * sizeof(Datum));
nulls = (bool *) palloc(ncolumns * sizeof(bool));
if (_state->rec)
{
HeapTupleData tuple;
/* Build a temporary HeapTuple control structure */
tuple.t_len = HeapTupleHeaderGetDatumLength(_state->rec);
ItemPointerSetInvalid(&(tuple.t_self));
tuple.t_tableOid = InvalidOid;
tuple.t_data = _state->rec;
/* Break down the tuple into fields */
heap_deform_tuple(&tuple, tupdesc, values, nulls);
}
else
{
for (i = 0; i < ncolumns; ++i)
{
values[i] = (Datum) 0;
nulls[i] = true;
}
}
for (i = 0; i < ncolumns; ++i)
{
ColumnIOData *column_info = &my_extra->columns[i];
Oid column_type = tupdesc->attrs[i]->atttypid;
char *value;
/* Ignore dropped columns in datatype */
if (tupdesc->attrs[i]->attisdropped)
{
nulls[i] = true;
continue;
}
memset(fname, 0, NAMEDATALEN);
strncpy(fname, NameStr(tupdesc->attrs[i]->attname), NAMEDATALEN);
hashentry = hash_search(json_hash, fname, HASH_FIND, NULL);
/*
* we can't just skip here if the key wasn't found since we might have
* a domain to deal with. If we were passed in a non-null record
* datum, we assume that the existing values are valid (if they're
* not, then it's not our fault), but if we were passed in a null,
* then every field which we don't populate needs to be run through
* the input function just in case it's a domain type.
*/
if (hashentry == NULL && rec)
continue;
/*
* Prepare to convert the column value from text
*/
if (column_info->column_type != column_type)
{
getTypeInputInfo(column_type,
&column_info->typiofunc,
&column_info->typioparam);
fmgr_info_cxt(column_info->typiofunc, &column_info->proc,
_state->fn_mcxt);
column_info->column_type = column_type;
}
if (hashentry == NULL || hashentry->isnull)
{
/*
* need InputFunctionCall to happen even for nulls, so that domain
* checks are done
*/
values[i] = InputFunctionCall(&column_info->proc, NULL,
column_info->typioparam,
tupdesc->attrs[i]->atttypmod);
nulls[i] = true;
}
else
{
value = hashentry->val;
values[i] = InputFunctionCall(&column_info->proc, value,
//.........这里部分代码省略.........
请发表评论