• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ ExecStoreTuple函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中ExecStoreTuple函数的典型用法代码示例。如果您正苦于以下问题:C++ ExecStoreTuple函数的具体用法?C++ ExecStoreTuple怎么用?C++ ExecStoreTuple使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了ExecStoreTuple函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: get_all_brokers

/*
 * get_all_brokers
 *
 * Return a list of all brokers in pipeline_kafka_brokers
 */
static List *
get_all_brokers(void)
{
	HeapTuple tup = NULL;
	HeapScanDesc scan;
	Relation brokers = open_pipeline_kafka_brokers();
	TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(brokers));
	List *result = NIL;

	scan = heap_beginscan(brokers, GetTransactionSnapshot(), 0, NULL);
	while ((tup = heap_getnext(scan, ForwardScanDirection)) != NULL)
	{
		char *host;
		Datum d;
		bool isnull;

		ExecStoreTuple(tup, slot, InvalidBuffer, false);
		d = slot_getattr(slot, BROKER_ATTR_HOST, &isnull);
		host = TextDatumGetCString(d);

		result = lappend(result, host);
	}

	ExecDropSingleTupleTableSlot(slot);
	heap_endscan(scan);
	heap_close(brokers, NoLock);

	return result;
}
开发者ID:huiyuanlu,项目名称:pipelinedb,代码行数:34,代码来源:pipeline_kafka.c


示例2: SpoolerInsert

void
SpoolerInsert(Spooler *self, HeapTuple tuple)
{
	/* Spool keys in the tuple */
	ExecStoreTuple(tuple, self->slot, InvalidBuffer, false);
	IndexSpoolInsert(self->spools, self->slot, &(tuple->t_self), self->estate, true);
	BULKLOAD_PROFILE(&prof_writer_index);
}
开发者ID:chuongnn,项目名称:pg_bulkload,代码行数:8,代码来源:pg_btree.c


示例3: FunctionNext

/* ----------------------------------------------------------------
 *		FunctionNext
 *
 *		This is a workhorse for ExecFunctionScan
 * ----------------------------------------------------------------
 */
static TupleTableSlot *
FunctionNext(FunctionScanState *node)
{
	TupleTableSlot *slot;
	EState	   *estate;
	ScanDirection direction;
	Tuplestorestate *tuplestorestate;
	bool		should_free;
	HeapTuple	heapTuple;

	/*
	 * get information from the estate and scan state
	 */
	estate = node->ss.ps.state;
	direction = estate->es_direction;

	tuplestorestate = node->tuplestorestate;

	/*
	 * If first time through, read all tuples from function and put them
	 * in a tuplestore. Subsequent calls just fetch tuples from
	 * tuplestore.
	 */
	if (tuplestorestate == NULL)
	{
		ExprContext *econtext = node->ss.ps.ps_ExprContext;
		TupleDesc	funcTupdesc;

		node->tuplestorestate = tuplestorestate =
			ExecMakeTableFunctionResult(node->funcexpr,
										econtext,
										node->tupdesc,
										&funcTupdesc);

		/*
		 * If function provided a tupdesc, cross-check it.	We only really
		 * need to do this for functions returning RECORD, but might as
		 * well do it always.
		 */
		if (funcTupdesc && !tupledesc_match(node->tupdesc, funcTupdesc))
			ereport(ERROR,
					(errcode(ERRCODE_DATATYPE_MISMATCH),
					 errmsg("query-specified return row and actual function return row do not match")));
	}

	/*
	 * Get the next tuple from tuplestore. Return NULL if no more tuples.
	 */
	heapTuple = tuplestore_getheaptuple(tuplestorestate,
										ScanDirectionIsForward(direction),
										&should_free);
	slot = node->ss.ss_ScanTupleSlot;
	return ExecStoreTuple(heapTuple, slot, InvalidBuffer, should_free);
}
开发者ID:berkeley-cs186,项目名称:course-fa07,代码行数:60,代码来源:nodeFunctionscan.c


示例4: FunctionNext

/* ----------------------------------------------------------------
 *		FunctionNext
 *
 *		This is a workhorse for ExecFunctionScan
 * ----------------------------------------------------------------
 */
static TupleTableSlot *
FunctionNext(FunctionScanState *node)
{
	TupleTableSlot *slot;
	EState	   *estate;
	ScanDirection direction;
	Tuplestorestate *tuplestorestate;
	bool		should_free;
	HeapTuple	heapTuple;

	/*
	 * get information from the estate and scan state
	 */
	estate = node->ss.ps.state;
	direction = estate->es_direction;

	tuplestorestate = node->tuplestorestate;

	/*
	 * If first time through, read all tuples from function and put them in a
	 * tuplestore. Subsequent calls just fetch tuples from tuplestore.
	 */
	if (tuplestorestate == NULL)
	{
		ExprContext *econtext = node->ss.ps.ps_ExprContext;
		TupleDesc	funcTupdesc;

		node->tuplestorestate = tuplestorestate =
			ExecMakeTableFunctionResult(node->funcexpr,
										econtext,
										node->tupdesc,
										&funcTupdesc);

		/*
		 * If function provided a tupdesc, cross-check it.	We only really
		 * need to do this for functions returning RECORD, but might as well
		 * do it always.
		 */
		if (funcTupdesc)
			tupledesc_match(node->tupdesc, funcTupdesc);
	}

	/*
	 * Get the next tuple from tuplestore. Return NULL if no more tuples.
	 */
	heapTuple = tuplestore_getheaptuple(tuplestorestate,
										ScanDirectionIsForward(direction),
										&should_free);
	slot = node->ss.ss_ScanTupleSlot;
	if (heapTuple)
		return ExecStoreTuple(heapTuple, slot, InvalidBuffer, should_free);
	else
		return ExecClearTuple(slot);
}
开发者ID:jaiminpan,项目名称:bizgres,代码行数:60,代码来源:nodeFunctionscan.c


示例5: SeqNext

/* ----------------------------------------------------------------
 *		SeqNext
 *
 *		This is a workhorse for ExecSeqScan
 * ----------------------------------------------------------------
 */
static TupleTableSlot *
SeqNext(SeqScanState *node)
{
	HeapTuple	tuple;
	HeapScanDesc scandesc;
	EState	   *estate;
	ScanDirection direction;
	TupleTableSlot *slot;

	/*
	 * get information from the estate and scan state
	 */
	scandesc = node->ss.ss_currentScanDesc;
	estate = node->ss.ps.state;
	direction = estate->es_direction;
	slot = node->ss.ss_ScanTupleSlot;

	if (scandesc == NULL)
	{
		/*
		 * We reach here if the scan is not parallel, or if we're executing a
		 * scan that was intended to be parallel serially.
		 */
		scandesc = heap_beginscan(node->ss.ss_currentRelation,
								  estate->es_snapshot,
								  0, NULL);
		node->ss.ss_currentScanDesc = scandesc;
	}

	/*
	 * get the next tuple from the table
	 */
	tuple = heap_getnext(scandesc, direction);

	/*
	 * save the tuple and the buffer returned to us by the access methods in
	 * our scan tuple slot and return the slot.  Note: we pass 'false' because
	 * tuples returned by heap_getnext() are pointers onto disk pages and were
	 * not created with palloc() and so should not be pfree()'d.  Note also
	 * that ExecStoreTuple will increment the refcount of the buffer; the
	 * refcount will not be dropped until the tuple table slot is cleared.
	 */
	if (tuple)
		ExecStoreTuple(tuple,	/* tuple to store */
					   slot,	/* slot to store in */
					   scandesc->rs_cbuf,	/* buffer associated with this
											 * tuple */
					   false);	/* don't pfree this pointer */
	else
		ExecClearTuple(slot);

	return slot;
}
开发者ID:BertrandAreal,项目名称:postgres,代码行数:59,代码来源:nodeSeqscan.c


示例6: load_consumer_offsets

/*
 * load_consumer_offsets
 *
 * Load all offsets for all of this consumer's partitions
 */
static void
load_consumer_offsets(KafkaConsumer *consumer, struct rd_kafka_metadata_topic *meta, int64_t offset)
{
	MemoryContext old;
	ScanKeyData skey[1];
	HeapTuple tup = NULL;
	HeapScanDesc scan;
	Relation offsets = open_pipeline_kafka_offsets();
	TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(offsets));
	int i;

	ScanKeyInit(&skey[0], OFFSETS_ATTR_CONSUMER, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(consumer->id));
	scan = heap_beginscan(offsets, GetTransactionSnapshot(), 1, skey);

	old = MemoryContextSwitchTo(CacheMemoryContext);
	consumer->offsets = palloc0(meta->partition_cnt * sizeof(int64_t));
	MemoryContextSwitchTo(old);

	/* by default, begin consuming from the end of a stream */
	for (i = 0; i < meta->partition_cnt; i++)
		consumer->offsets[i] = offset;

	consumer->num_partitions = meta->partition_cnt;

	while ((tup = heap_getnext(scan, ForwardScanDirection)) != NULL)
	{
		Datum d;
		bool isnull;
		int partition;
		ExecStoreTuple(tup, slot, InvalidBuffer, false);

		d = slot_getattr(slot, OFFSETS_ATTR_PARTITION, &isnull);
		partition = DatumGetInt32(d);

		if(partition > consumer->num_partitions)
			elog(ERROR, "invalid partition id: %d", partition);

		if (offset == RD_KAFKA_OFFSET_NULL)
		{
			d = slot_getattr(slot, OFFSETS_ATTR_OFFSET, &isnull);
			if (isnull)
				offset = RD_KAFKA_OFFSET_END;
			else
				offset = DatumGetInt64(d);
		}

		consumer->offsets[partition] = DatumGetInt64(offset);
	}

	ExecDropSingleTupleTableSlot(slot);
	heap_endscan(scan);
	heap_close(offsets, RowExclusiveLock);
}
开发者ID:huiyuanlu,项目名称:pipelinedb,代码行数:58,代码来源:pipeline_kafka.c


示例7: ExecRecFetch

/*
 * ExecRecFetch -- fetch next potential tuple
 *
 * This routine is concerned with substituting a test tuple if we are
 * inside an EvalPlanQual recheck.	If we aren't, just execute
 * the access method's next-tuple routine.
 *
 * This method is identical to ExecScanFetch, we just want to avoid
 * possible duplicate function issues.
 */
static inline TupleTableSlot *
ExecRecFetch(ScanState *node,
			  ExecScanAccessMtd accessMtd,
			  ExecScanRecheckMtd recheckMtd)
{
	EState	   *estate = node->ps.state;

	if (estate->es_epqTuple != NULL)
	{
		/*
		 * We are inside an EvalPlanQual recheck.  Return the test tuple if
		 * one is available, after rechecking any access-method-specific
		 * conditions.
		 */
		Index		scanrelid = ((Scan *) node->ps.plan)->scanrelid;

		Assert(scanrelid > 0);
		if (estate->es_epqTupleSet[scanrelid - 1])
		{
			TupleTableSlot *slot = node->ss_ScanTupleSlot;

			/* Return empty slot if we already returned a tuple */
			if (estate->es_epqScanDone[scanrelid - 1])
				return ExecClearTuple(slot);
			/* Else mark to remember that we shouldn't return more */
			estate->es_epqScanDone[scanrelid - 1] = true;

			/* Return empty slot if we haven't got a test tuple */
			if (estate->es_epqTuple[scanrelid - 1] == NULL)
				return ExecClearTuple(slot);

			/* Store test tuple in the plan node's scan slot */
			ExecStoreTuple(estate->es_epqTuple[scanrelid - 1],
						   slot, InvalidBuffer, false);

			/* Check if it meets the access-method conditions */
			if (!(*recheckMtd) (node, slot))
				ExecClearTuple(slot);	/* would not be returned by scan */

			return slot;
		}
	}

	/*
	 * Run the node-type-specific access method function to get the next tuple
	 */
	return (*accessMtd) (node);
}
开发者ID:abpin,项目名称:recdb-postgresql,代码行数:58,代码来源:execRecommend.c


示例8: gather_getnext

/*
 * Read the next tuple.  We might fetch a tuple from one of the tuple queues
 * using gather_readnext, or if no tuple queue contains a tuple and the
 * single_copy flag is not set, we might generate one locally instead.
 */
static TupleTableSlot *
gather_getnext(GatherState *gatherstate)
{
	PlanState  *outerPlan = outerPlanState(gatherstate);
	TupleTableSlot *outerTupleSlot;
	TupleTableSlot *fslot = gatherstate->funnel_slot;
	HeapTuple	tup;

	while (gatherstate->nreaders > 0 || gatherstate->need_to_scan_locally)
	{
		CHECK_FOR_INTERRUPTS();

		if (gatherstate->nreaders > 0)
		{
			tup = gather_readnext(gatherstate);

			if (HeapTupleIsValid(tup))
			{
				ExecStoreTuple(tup, /* tuple to store */
							   fslot,	/* slot in which to store the tuple */
							   InvalidBuffer,	/* buffer associated with this
												 * tuple */
							   true);	/* pfree tuple when done with it */
				return fslot;
			}
		}

		if (gatherstate->need_to_scan_locally)
		{
			EState *estate = gatherstate->ps.state;

			/* Install our DSA area while executing the plan. */
			estate->es_query_dsa =
				gatherstate->pei ? gatherstate->pei->area : NULL;
			outerTupleSlot = ExecProcNode(outerPlan);
			estate->es_query_dsa = NULL;

			if (!TupIsNull(outerTupleSlot))
				return outerTupleSlot;

			gatherstate->need_to_scan_locally = false;
		}
	}

	return ExecClearTuple(fslot);
}
开发者ID:maksm90,项目名称:postgresql,代码行数:51,代码来源:nodeGather.c


示例9: ExecCopySlot

/* --------------------------------
 *		ExecCopySlot
 *			Copy the source slot's contents into the destination slot.
 *
 *		The destination acquires a private copy that will not go away
 *		if the source is cleared.
 *
 *		The caller must ensure the slots have compatible tupdescs.
 * --------------------------------
 */
TupleTableSlot *
ExecCopySlot(TupleTableSlot *dstslot, TupleTableSlot *srcslot)
{
	HeapTuple	newTuple;
	MemoryContext oldContext;

	/*
	 * There might be ways to optimize this when the source is virtual, but
	 * for now just always build a physical copy.  Make sure it is in the
	 * right context.
	 */
	oldContext = MemoryContextSwitchTo(dstslot->tts_mcxt);
	newTuple = ExecCopySlotTuple(srcslot);
	MemoryContextSwitchTo(oldContext);

	return ExecStoreTuple(newTuple, dstslot, InvalidBuffer, true);
}
开发者ID:CraigBryan,项目名称:PostgresqlFun,代码行数:27,代码来源:execTuples.c


示例10: CopyIntoStream

/*
 * CopyIntoStream
 *
 * COPY events to a stream from an input source
 */
void
CopyIntoStream(Relation rel, TupleDesc desc, HeapTuple *tuples, int ntuples)
{
	bool snap = ActiveSnapshotSet();
	ResultRelInfo rinfo;
	StreamInsertState *sis;

	MemSet(&rinfo, 0, sizeof(ResultRelInfo));
	rinfo.ri_RangeTableIndex = 1; /* dummy */
	rinfo.ri_TrigDesc = NULL;
	rinfo.ri_RelationDesc = rel;

	if (snap)
		PopActiveSnapshot();

	BeginStreamModify(NULL, &rinfo, list_make1(desc), 0, 0);
	sis = (StreamInsertState *) rinfo.ri_FdwState;
	Assert(sis);

	if (sis->queries)
	{
		TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(rel));
		int i;

		for (i = 0; i < ntuples; i++)
		{
			ExecStoreTuple(tuples[i], slot, InvalidBuffer, false);
			ExecStreamInsert(NULL, &rinfo, slot, NULL);
			ExecClearTuple(slot);
		}

		ExecDropSingleTupleTableSlot(slot);

		Assert(sis->ntups == ntuples);
		pgstat_increment_cq_write(ntuples, sis->nbytes);
	}

	EndStreamModify(NULL, &rinfo);

	if (snap)
		PushActiveSnapshot(GetTransactionSnapshot());
}
开发者ID:usmanm,项目名称:pipelinedb,代码行数:47,代码来源:stream.c


示例11: gather_getnext

/*
 * Read the next tuple.  We might fetch a tuple from one of the tuple queues
 * using gather_readnext, or if no tuple queue contains a tuple and the
 * single_copy flag is not set, we might generate one locally instead.
 */
static TupleTableSlot *
gather_getnext(GatherState *gatherstate)
{
	PlanState  *outerPlan = outerPlanState(gatherstate);
	TupleTableSlot *outerTupleSlot;
	TupleTableSlot *fslot = gatherstate->funnel_slot;
	HeapTuple	tup;

	while (gatherstate->reader != NULL || gatherstate->need_to_scan_locally)
	{
		if (gatherstate->reader != NULL)
		{
			tup = gather_readnext(gatherstate);

			if (HeapTupleIsValid(tup))
			{
				ExecStoreTuple(tup,		/* tuple to store */
							   fslot,	/* slot in which to store the tuple */
							   InvalidBuffer,	/* buffer associated with this
												 * tuple */
							   true);	/* pfree this pointer if not from heap */

				return fslot;
			}
		}

		if (gatherstate->need_to_scan_locally)
		{
			outerTupleSlot = ExecProcNode(outerPlan);

			if (!TupIsNull(outerTupleSlot))
				return outerTupleSlot;

			gatherstate->need_to_scan_locally = false;
		}
	}

	return ExecClearTuple(fslot);
}
开发者ID:linwanggm,项目名称:postgres,代码行数:44,代码来源:nodeGather.c


示例12: CheckerConstraints

HeapTuple
CheckerConstraints(Checker *checker, HeapTuple tuple, int *parsing_field)
{
	if (checker->has_constraints)
	{
		*parsing_field = 0;

		/* Place tuple in tuple slot */
		ExecStoreTuple(tuple, checker->slot, InvalidBuffer, false);

		/* Check the constraints of the tuple */
		ExecConstraints(checker->resultRelInfo, checker->slot, checker->estate);
	}
	else if (checker->has_not_null && HeapTupleHasNulls(tuple))
	{
		/*
		 * Even if CHECK_CONSTRAINTS is not specified, check NOT NULL constraint
		 */
		TupleDesc	desc = checker->desc;
		int			i;

		for (i = 0; i < desc->natts; i++)
		{
			if (desc->attrs[i]->attnotnull &&
				att_isnull(i, tuple->t_data->t_bits))
			{
				*parsing_field = i + 1;	/* 1 origin */
				ereport(ERROR,
						(errcode(ERRCODE_NOT_NULL_VIOLATION),
						 errmsg("null value in column \"%s\" violates not-null constraint",
						NameStr(desc->attrs[i]->attname))));
			}
		}
	}

	return tuple;
}
开发者ID:gatehouse,项目名称:pg_bulkload,代码行数:37,代码来源:reader.c


示例13: ExecMaterializeSlot

/* --------------------------------
 *		ExecMaterializeSlot
 *			Force a slot into the "materialized" state.
 *
 *		This causes the slot's tuple to be a local copy not dependent on
 *		any external storage.  A pointer to the contained tuple is returned.
 *
 *		A typical use for this operation is to prepare a computed tuple
 *		for being stored on disk.  The original data may or may not be
 *		virtual, but in any case we need a private copy for heap_insert
 *		to scribble on.
 * --------------------------------
 */
HeapTuple
ExecMaterializeSlot(TupleTableSlot *slot)
{
	HeapTuple	newTuple;
	MemoryContext oldContext;

	/*
	 * sanity checks
	 */
	Assert(slot != NULL);
	Assert(!slot->tts_isempty);

	/*
	 * If we have a physical tuple, and it's locally palloc'd, we have nothing
	 * to do.
	 */
	if (slot->tts_tuple && slot->tts_shouldFree)
		return slot->tts_tuple;

	/*
	 * Otherwise, copy or build a tuple, and then store it as the new slot
	 * value.  (Note: tts_nvalid will be reset to zero here.  There are cases
	 * in which this could be optimized but it's probably not worth worrying
	 * about.)
	 *
	 * We may be called in a context that is shorter-lived than the tuple
	 * slot, but we have to ensure that the materialized tuple will survive
	 * anyway.
	 */
	oldContext = MemoryContextSwitchTo(slot->tts_mcxt);
	newTuple = ExecCopySlotTuple(slot);
	MemoryContextSwitchTo(oldContext);

	ExecStoreTuple(newTuple, slot, InvalidBuffer, true);

	return slot->tts_tuple;
}
开发者ID:CraigBryan,项目名称:PostgresqlFun,代码行数:50,代码来源:execTuples.c


示例14: save_consumer_state

/*
 * save_consumer_state
 *
 * Saves the given consumer's state to pipeline_kafka_consumers
 */
static void
save_consumer_state(KafkaConsumer *consumer, int partition_group)
{
	ScanKeyData skey[1];
	HeapTuple tup = NULL;
	HeapScanDesc scan;
	Relation offsets = open_pipeline_kafka_offsets();
	Datum values[OFFSETS_RELATION_NATTS];
	bool nulls[OFFSETS_RELATION_NATTS];
	bool replace[OFFSETS_RELATION_NATTS];
	bool updated[consumer->num_partitions];
	TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(offsets));
	int partition;

	MemSet(updated, false, sizeof(updated));

	ScanKeyInit(&skey[0], OFFSETS_ATTR_CONSUMER, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(consumer->id));
	scan = heap_beginscan(offsets, GetTransactionSnapshot(), 1, skey);

	/* update any existing offset rows */
	while ((tup = heap_getnext(scan, ForwardScanDirection)) != NULL)
	{
		Datum d;
		bool isnull;
		int partition;
		HeapTuple modified;

		ExecStoreTuple(tup, slot, InvalidBuffer, false);
		d = slot_getattr(slot, OFFSETS_ATTR_PARTITION, &isnull);
		partition = DatumGetInt32(d);

		/* we only want to update the offsets we're responsible for */
		if (partition % consumer->parallelism != partition_group)
			continue;

		MemSet(nulls, false, sizeof(nulls));
		MemSet(replace, false, sizeof(nulls));

		values[OFFSETS_ATTR_OFFSET - 1] = Int64GetDatum(consumer->offsets[partition]);
		replace[OFFSETS_ATTR_OFFSET - 1] = true;
		updated[partition] = true;

		modified = heap_modify_tuple(tup, RelationGetDescr(offsets), values, nulls, replace);
		simple_heap_update(offsets, &modified->t_self, modified);
	}

	heap_endscan(scan);

	/* now insert any offset rows that didn't already exist */
	for (partition = 0; partition < consumer->num_partitions; partition++)
	{
		if (updated[partition])
			continue;

		if (partition % consumer->parallelism != partition_group)
			continue;

		values[OFFSETS_ATTR_CONSUMER - 1] = ObjectIdGetDatum(consumer->id);
		values[OFFSETS_ATTR_PARTITION - 1] = Int32GetDatum(partition);
		values[OFFSETS_ATTR_OFFSET - 1] = Int64GetDatum(consumer->offsets[partition]);

		MemSet(nulls, false, sizeof(nulls));

		tup = heap_form_tuple(RelationGetDescr(offsets), values, nulls);
		simple_heap_insert(offsets, tup);
	}

	ExecDropSingleTupleTableSlot(slot);
	heap_close(offsets, NoLock);
}
开发者ID:huiyuanlu,项目名称:pipelinedb,代码行数:75,代码来源:pipeline_kafka.c


示例15: SeqNext

/* ----------------------------------------------------------------
 *		SeqNext
 *
 *		This is a workhorse for ExecSeqScan
 * ----------------------------------------------------------------
 */
static TupleTableSlot *
SeqNext(SeqScanState *node)
{
	HeapTuple	tuple;
	HeapScanDesc scandesc;
	Index		scanrelid;
	EState	   *estate;
	ScanDirection direction;
	TupleTableSlot *slot;

	/*
	 * get information from the estate and scan state
	 */
	estate = node->ps.state;
	scandesc = node->ss_currentScanDesc;
	scanrelid = ((SeqScan *) node->ps.plan)->scanrelid;
	direction = estate->es_direction;
	slot = node->ss_ScanTupleSlot;

	/*
	 * Check if we are evaluating PlanQual for tuple of this relation.
	 * Additional checking is not good, but no other way for now. We could
	 * introduce new nodes for this case and handle SeqScan --> NewNode
	 * switching in Init/ReScan plan...
	 */
	if (estate->es_evTuple != NULL &&
		estate->es_evTuple[scanrelid - 1] != NULL)
	{
		if (estate->es_evTupleNull[scanrelid - 1])
			return ExecClearTuple(slot);

		ExecStoreTuple(estate->es_evTuple[scanrelid - 1],
					   slot, InvalidBuffer, false);

		/*
		 * Note that unlike IndexScan, SeqScan never use keys in
		 * heap_beginscan (and this is very bad) - so, here we do not check
		 * are keys ok or not.
		 */

		/* Flag for the next call that no more tuples */
		estate->es_evTupleNull[scanrelid - 1] = true;
		return slot;
	}

	/*
	 * get the next tuple from the access methods
	 */
	scandesc->estate=estate;
	scandesc->slot=slot;
	scandesc->qual=node->ps.qual;
	tuple = heap_getnext(scandesc, direction);

	/*
	 * save the tuple and the buffer returned to us by the access methods in
	 * our scan tuple slot and return the slot.  Note: we pass 'false' because
	 * tuples returned by heap_getnext() are pointers onto disk pages and were
	 * not created with palloc() and so should not be pfree()'d.  Note also
	 * that ExecStoreTuple will increment the refcount of the buffer; the
	 * refcount will not be dropped until the tuple table slot is cleared.
	 */
	if (tuple)
		ExecStoreTuple(tuple,	/* tuple to store */
					   slot,	/* slot to store in */
					   scandesc->rs_cbuf,		/* buffer associated with this
												 * tuple */
					   false);	/* don't pfree this pointer */
	else
		ExecClearTuple(slot);

	return slot;
}
开发者ID:Khalefa,项目名称:VLDB12Demo,代码行数:78,代码来源:nodeSeqscan.c


示例16: ExecSort


//.........这里部分代码省略.........
	HeapTuple	heapTuple;
	TupleTableSlot *slot;
	bool		should_free;

	/*
	 * get state info from node
	 */
	SO1_printf("ExecSort: %s\n",
			   "entering routine");

	estate = node->ss.ps.state;
	dir = estate->es_direction;
	tuplesortstate = (Tuplesortstate *) node->tuplesortstate;

	/*
	 * If first time through, read all tuples from outer plan and pass them to
	 * tuplesort.c. Subsequent calls just fetch tuples from tuplesort.
	 */

	if (!node->sort_Done)
	{
		Sort	   *plannode = (Sort *) node->ss.ps.plan;
		PlanState  *outerNode;
		TupleDesc	tupDesc;

		SO1_printf("ExecSort: %s\n",
				   "sorting subplan");

		/*
		 * Want to scan subplan in the forward direction while creating the
		 * sorted data.
		 */
		estate->es_direction = ForwardScanDirection;

		/*
		 * Initialize tuplesort module.
		 */
		SO1_printf("ExecSort: %s\n",
				   "calling tuplesort_begin");

		outerNode = outerPlanState(node);
		tupDesc = ExecGetResultType(outerNode);

		tuplesortstate = tuplesort_begin_heap(tupDesc,
											  plannode->numCols,
											  plannode->sortOperators,
											  plannode->sortColIdx,
											  work_mem,
											  node->randomAccess);
		node->tuplesortstate = (void *) tuplesortstate;

		/*
		 * Scan the subplan and feed all the tuples to tuplesort.
		 */

		for (;;)
		{
			slot = ExecProcNode(outerNode);

			if (TupIsNull(slot))
				break;

			tuplesort_puttuple(tuplesortstate,
							   (void *) ExecFetchSlotTuple(slot));
		}

		/*
		 * Complete the sort.
		 */
		tuplesort_performsort(tuplesortstate);

		/*
		 * restore to user specified direction
		 */
		estate->es_direction = dir;

		/*
		 * finally set the sorted flag to true
		 */
		node->sort_Done = true;
		SO1_printf("ExecSort: %s\n", "sorting done");
	}

	SO1_printf("ExecSort: %s\n",
			   "retrieving tuple from tuplesort");

	/*
	 * Get the first or next tuple from tuplesort. Returns NULL if no more
	 * tuples.
	 */
	heapTuple = tuplesort_getheaptuple(tuplesortstate,
									   ScanDirectionIsForward(dir),
									   &should_free);

	slot = node->ss.ps.ps_ResultTupleSlot;
	if (heapTuple)
		return ExecStoreTuple(heapTuple, slot, InvalidBuffer, should_free);
	else
		return ExecClearTuple(slot);
}
开发者ID:jaiminpan,项目名称:bizgres,代码行数:101,代码来源:nodeSort.c


示例17: pipeline_stream_insert

Datum
pipeline_stream_insert(PG_FUNCTION_ARGS)
{
	TriggerData *trigdata = (TriggerData *) fcinfo->context;
	Trigger *trig = trigdata->tg_trigger;
	HeapTuple tup;
	List *fdw_private;
	int i;
	ResultRelInfo rinfo;

	if (trig->tgnargs < 1)
		elog(ERROR, "pipeline_stream_insert: must be provided a stream name");

	/* make sure it's called as a trigger */
	if (!CALLED_AS_TRIGGER(fcinfo))
		ereport(ERROR,
				(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
				 errmsg("pipeline_stream_insert: must be called as trigger")));

	/* and that it's called on update or insert */
	if (!TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event) && !TRIGGER_FIRED_BY_INSERT(trigdata->tg_event))
		ereport(ERROR,
				(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
				 errmsg("pipeline_stream_insert: must be called on insert or update")));

	/* and that it's called for each row */
	if (!TRIGGER_FIRED_FOR_ROW(trigdata->tg_event))
		ereport(ERROR,
				(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
				 errmsg("pipeline_stream_insert: must be called for each row")));

	/* and that it's called after insert or update */
	if (!TRIGGER_FIRED_AFTER(trigdata->tg_event))
		ereport(ERROR,
				(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
				 errmsg("pipeline_stream_insert: must be called after insert or update")));

	if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
		tup = trigdata->tg_newtuple;
	else
		tup = trigdata->tg_trigtuple;

	fdw_private = list_make1(RelationGetDescr(trigdata->tg_relation));

	MemSet(&rinfo, 0, sizeof(ResultRelInfo));
	rinfo.ri_RangeTableIndex = 1; /* dummy */
	rinfo.ri_TrigDesc = NULL;

	for (i = 0; i < trig->tgnargs; i++)
	{
		RangeVar *stream;
		Relation rel;
		StreamInsertState *sis;

		stream = makeRangeVarFromNameList(textToQualifiedNameList(cstring_to_text(trig->tgargs[i])));
		rel = heap_openrv(stream, AccessShareLock);

		rinfo.ri_RelationDesc = rel;

		BeginStreamModify(NULL, &rinfo, fdw_private, 0, 0);
		sis = (StreamInsertState *) rinfo.ri_FdwState;
		Assert(sis);

		if (sis->queries)
		{
			TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(rel));

			ExecStoreTuple(tup, slot, InvalidBuffer, false);
			ExecStreamInsert(NULL, &rinfo, slot, NULL);
			ExecClearTuple(slot);

			ExecDropSingleTupleTableSlot(slot);
			pgstat_report_streamstat(true);
		}

		EndStreamModify(NULL, &rinfo);
		heap_close(rel, AccessShareLock);
	}

	return PointerGetDatum(tup);
}
开发者ID:usmanm,项目名称:pipelinedb,代码行数:81,代码来源:stream.c


示例18: RelationFindReplTupleByIndex

/*
 * Search the relation 'rel' for tuple using the index.
 *
 * If a matching tuple is found, lock it with lockmode, fill the slot with its
 * contents, and return true.  Return false otherwise.
 */
bool
RelationFindReplTupleByIndex(Relation rel, Oid idxoid,
							 LockTupleMode lockmode,
							 TupleTableSlot *searchslot,
							 TupleTableSlot *outslot)
{
	HeapTuple	scantuple;
	ScanKeyData skey[INDEX_MAX_KEYS];
	IndexScanDesc scan;
	SnapshotData snap;
	TransactionId xwait;
	Relation	idxrel;
	bool		found;

	/* Open the index. */
	idxrel = index_open(idxoid, RowExclusiveLock);

	/* Start an index scan. */
	InitDirtySnapshot(snap);
	scan = index_beginscan(rel, idxrel, &snap,
						   RelationGetNumberOfAttributes(idxrel),
						   0);

	/* Build scan key. */
	build_replindex_scan_key(skey, rel, idxrel, searchslot);

retry:
	found = false;

	index_rescan(scan, skey, RelationGetNumberOfAttributes(idxrel), NULL, 0);

	/* Try to find the tuple */
	if ((scantuple = index_getnext(scan, ForwardScanDirection)) != NULL)
	{
		found = true;
		ExecStoreTuple(scantuple, outslot, InvalidBuffer, false);
		ExecMaterializeSlot(outslot);

		xwait = TransactionIdIsValid(snap.xmin) ?
			snap.xmin : snap.xmax;

		/*
		 * If the tuple is locked, wait for locking transaction to finish and
		 * retry.
		 */
		if (TransactionIdIsValid(xwait))
		{
			XactLockTableWait(xwait, NULL, NULL, XLTW_None);
			goto retry;
		}
	}

	/* Found tuple, try to lock it in the lockmode. */
	if (found)
	{
		Buffer		buf;
		HeapUpdateFailureData hufd;
		HTSU_Result res;
		HeapTupleData locktup;

		ItemPointerCopy(&outslot->tts_tuple->t_self, &locktup.t_self);

		PushActiveSnapshot(GetLatestSnapshot());

		res = heap_lock_tuple(rel, &locktup, GetCurrentCommandId(false),
							  lockmode,
							  LockWaitBlock,
							  false /* don't follow updates */ ,
							  &buf, &hufd);
		/* the tuple slot already has the buffer pinned */
		ReleaseBuffer(buf);

		PopActiveSnapshot();

		switch (res)
		{
			case HeapTupleMayBeUpdated:
				break;
			case HeapTupleUpdated:
				/* XXX: Improve handling here */
				ereport(LOG,
						(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
						 errmsg("concurrent update, retrying")));
				goto retry;
			case HeapTupleInvisible:
				elog(ERROR, "attempted to lock invisible tuple");
			default:
				elog(ERROR, "unexpected heap_lock_tuple status: %u", res);
				break;
		}
	}

	index_endscan(scan);

//.........这里部分代码省略.........
开发者ID:timmui,项目名称:postgres,代码行数:101,代码来源:execReplication.c


示例19: apply_handle_update

/*
 * Handle UPDATE message.
 *
 * TODO: FDW support
 */
static void
apply_handle_update(StringInfo s)
{
	LogicalRepRelMapEntry *rel;
	LogicalRepRelId relid;
	Oid			idxoid;
	EState	   *estate;
	EPQState	epqstate;
	LogicalRepTupleData oldtup;
	LogicalRepTupleData newtup;
	bool		has_oldtup;
	TupleTableSlot *localslot;
	TupleTableSlot *remoteslot;
	bool		found;
	MemoryContext oldctx;

	ensure_transaction();

	relid = logicalrep_read_update(s, &has_oldtup, &oldtup,
								   &newtup);
	rel = logicalrep_rel_open(relid, RowExclusiveLock);
	if (!should_apply_changes_for_rel(rel))
	{
		/*
		 * The relation can't become interesting in the middle of the
		 * transaction so it's safe to unlock it.
		 */
		logicalrep_rel_close(rel, RowExclusiveLock);
		return;
	}

	/* Check if we can do the update. */
	check_relation_updatable(rel);

	/* Initialize the executor state. */
	estate = create_estate_for_relation(rel);
	remoteslot = ExecInitExtraTupleSlot(estate,
										RelationGetDescr(rel->localrel));
	localslot = ExecInitExtraTupleSlot(estate,
									   RelationGetDescr(rel->localrel));
	EvalPlanQualInit(&epqstate, estate, NULL, NIL, -1);

	PushActiveSnapshot(GetTransactionSnapshot());
	ExecOpenIndices(estate->es_result_relation_info, false);

	/* Build the search tuple. */
	oldctx = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
	slot_store_cstrings(remoteslot, rel,
						has_oldtup ? oldtup.values : newtup.values);
	MemoryContextSwitchTo(oldctx);

	/*
	 * Try to find tuple using either replica identity index, primary key or
	 * if needed, sequential scan.
	 */
	idxoid = GetRelationIdentityOrPK(rel->localrel);
	Assert(OidIsValid(idxoid) ||
		   (rel->remoterel.replident == REPLICA_IDENTITY_FULL && has_oldtup));

	if (OidIsValid(idxoid))
		found = RelationFindReplTupleByIndex(rel->localrel, idxoid,
											 LockTupleExclusive,
											 remoteslot, localslot);
	else
		found = RelationFindReplTupleSeq(rel->localrel, LockTupleExclusive,
										 remoteslot, localslot);

	ExecClearTuple(remoteslot);

	/*
	 * Tuple found.
	 *
	 * Note this will fail if there are other conflicting unique indexes.
	 */
	if (found)
	{
		/* Process and store remote tuple in the slot */
		oldctx = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
		ExecStoreTuple(localslot->tts_tuple, remoteslot, InvalidBuffer, false);
		slot_modify_cstrings(remoteslot, rel, newtup.values, newtup.changed);
		MemoryContextSwitchTo(oldctx);

		EvalPlanQualSetSlot(&epqstate, remoteslot);

		/* Do the actual update. */
		ExecSimpleRelationUpdate(estate, &epqstate, localslot, remoteslot);
	}
	else
	{
		/*
		 * The tuple to be updated could not be found.
		 *
		 * TODO what to do here, change the log level to LOG perhaps?
		 */
		elog(DEBUG1,
//.........这里部分代码省略.........
开发者ID:RingsC,项目名称:postgres,代码行数:101,代码来源:worker.c


示例20: RelationFindReplTupleSeq

/*
 * Search the relation 'rel' for tuple using the sequential scan.
 *
 * If a matching tuple is found, lock it with lockmode, fill the slot with its
 * contents, and return true.  Return false otherwise.
 *
 * Note that this stops on the first matching tuple.
 *
 * This can obviously be quite slow on tables that have more than few rows.
 */
bool
RelationFindReplTupleSeq(Relation rel, LockTupleMode lockmode,
						 TupleTableSlot *searchslot, TupleTableSlot *outslot)
{
	HeapTuple	scantuple;
	HeapScanDesc scan;
	SnapshotData snap;
	TransactionId xwait;
	bool		found;
	TupleDesc	desc = RelationGetDescr(rel);

	Assert(equalTupleDescs(desc, outslot->tts_tupleDescriptor));

	/* Start an index scan. */
	InitDirtySnapshot(snap);
	scan = heap_beginscan(rel, &snap, 0, NULL);

retry:
	found = false;

	heap_rescan(scan, NULL);

	/* Try to find the tuple */
	while ((scantuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
	{
		if (!tuple_equals_slot(desc, scantuple, searchslot))
			continue;

		found = true;
		ExecStoreTuple(scantuple, outslot, InvalidBuffer, false);
		ExecMaterializeSlot(outslot);

		xwait = TransactionIdIsValid(snap.xmin) ?
			snap.xmin : snap.xmax;

		/*
		 * If the tuple is locked, wait for locking transaction to finish and
		 * retry.
		 */
		if (TransactionIdIsValid(xwait))
		{
			XactLockTableWait(xwait, NULL, NULL, XLTW_None);
			goto retry;
		}
	}

	/* Found tuple, try to lock it in the lockmode. */
	if (found)
	{
		Buffer		buf;
		HeapUpdateFailureData hufd;
		HTSU_Result res;
		HeapTupleData locktup;

		ItemPointerCopy(&outslot->tts_tuple->t_self, &locktup.t_self);

		PushActiveSnapshot(GetLatestSnapshot());

		res = heap_lock_tuple(rel, &locktup, GetCurrentCommandId(false),
							  lockmode,
							  LockWaitBlock,
							  false /* don't follow updates */ ,
							  &buf, &hufd);
		/* the tuple slot already has the buffer pinned */
		ReleaseBuffer(buf);

		PopActiveSnapshot();

		switch (res)
		{
			case HeapTupleMayBeUpdated:
				break;
			case HeapTupleUpdated:
				/* XXX: Improve handling here */
				ereport(LOG,
						(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
						 errmsg("concurrent update, retrying")));
				goto retry;
			case HeapTupleInvisible:
				elog(ERROR, "attempted to lock invisible tuple");
			default:
				elog(ERROR, "unexpected heap_lock_tuple status: %u", res);
				break;
		}
	}

	heap_endscan(scan);

	return found;
}
开发者ID:timmui,项目名称:postgres,代码行数:100,代码来源:execReplication.c



注:本文中的ExecStoreTuple函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ Execute函数代码示例发布时间:2022-05-30
下一篇:
C++ ExecReScan函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap