本文整理汇总了C++中ItemIdIsDead函数的典型用法代码示例。如果您正苦于以下问题:C++ ItemIdIsDead函数的具体用法?C++ ItemIdIsDead怎么用?C++ ItemIdIsDead使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ItemIdIsDead函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: BTReaderGetNextItem
/**
* @brief Get the next smaller item from the old index
*
* Process flow
* -# Examine the max offset position in the page
* -# Search the next item
* -# If the item has deleted flag, seearch the next one
* -# If we can't find items any more, read the leaf page on the right side
* and search the next again
*
* These members are updated:
* - page : page which includes picked-up item
* - offnum : item offset number of the picked-up item
*
* @param reader [in/out] BTReader structure
* @return next index tuple, or null if no more tuples
*/
static IndexTuple
BTReaderGetNextItem(BTReader *reader)
{
OffsetNumber maxoff;
ItemId itemid;
BTPageOpaque opaque;
/*
* If any leaf page isn't read, the state is treated like as EOF
*/
if (reader->blkno == InvalidBlockNumber)
return NULL;
maxoff = PageGetMaxOffsetNumber(reader->page);
for (;;)
{
/*
* If no one items are picked up, offnum is set to InvalidOffsetNumber.
*/
if (reader->offnum == InvalidOffsetNumber)
{
opaque = (BTPageOpaque) PageGetSpecialPointer(reader->page);
reader->offnum = P_FIRSTDATAKEY(opaque);
}
else
reader->offnum = OffsetNumberNext(reader->offnum);
if (reader->offnum <= maxoff)
{
itemid = PageGetItemId(reader->page, reader->offnum);
/* Ignore dead items */
if (ItemIdIsDead(itemid))
continue;
return (IndexTuple) PageGetItem(reader->page, itemid);
}
else
{
/* The end of the leaf page. Go right. */
opaque = (BTPageOpaque) PageGetSpecialPointer(reader->page);
if (P_RIGHTMOST(opaque))
return NULL; /* No more index tuples */
BTReaderReadPage(reader, opaque->btpo_next);
maxoff = PageGetMaxOffsetNumber(reader->page);
}
}
}
开发者ID:chuongnn,项目名称:pg_bulkload,代码行数:68,代码来源:pg_btree.c
示例2: hashgetbitmap
/*
* hashgetbitmap() -- get all tuples at once
*/
Datum
hashgetbitmap(PG_FUNCTION_ARGS)
{
IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
TIDBitmap *tbm = (TIDBitmap *) PG_GETARG_POINTER(1);
HashScanOpaque so = (HashScanOpaque) scan->opaque;
bool res;
int64 ntids = 0;
res = _hash_first(scan, ForwardScanDirection);
while (res)
{
bool add_tuple;
/*
* Skip killed tuples if asked to.
*/
if (scan->ignore_killed_tuples)
{
Page page;
OffsetNumber offnum;
offnum = ItemPointerGetOffsetNumber(&(so->hashso_curpos));
page = BufferGetPage(so->hashso_curbuf);
add_tuple = !ItemIdIsDead(PageGetItemId(page, offnum));
}
else
add_tuple = true;
/* Save tuple ID, and continue scanning */
if (add_tuple)
{
/* Note we mark the tuple ID as requiring recheck */
tbm_add_tuples(tbm, &(so->hashso_heappos), 1, true);
ntids++;
}
res = _hash_next(scan, ForwardScanDirection);
}
PG_RETURN_INT64(ntids);
}
开发者ID:amulsul,项目名称:postgres,代码行数:46,代码来源:hash.c
示例3: hashgetbitmap
/*
* hashgetbitmap() -- get all tuples at once
*/
int64
hashgetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
{
HashScanOpaque so = (HashScanOpaque) scan->opaque;
bool res;
int64 ntids = 0;
res = _hash_first(scan, ForwardScanDirection);
while (res)
{
bool add_tuple;
/*
* Skip killed tuples if asked to.
*/
if (scan->ignore_killed_tuples)
{
Page page;
OffsetNumber offnum;
offnum = ItemPointerGetOffsetNumber(&(so->hashso_curpos));
page = BufferGetPage(so->hashso_curbuf, NULL, NULL,
BGP_NO_SNAPSHOT_TEST);
add_tuple = !ItemIdIsDead(PageGetItemId(page, offnum));
}
else
add_tuple = true;
/* Save tuple ID, and continue scanning */
if (add_tuple)
{
/* Note we mark the tuple ID as requiring recheck */
tbm_add_tuples(tbm, &(so->hashso_heappos), 1, true);
ntids++;
}
res = _hash_next(scan, ForwardScanDirection);
}
return ntids;
}
开发者ID:Hu1-Li,项目名称:postgres,代码行数:45,代码来源:hash.c
示例4: istatus_text
static text *
istatus_text(ItemId itemid)
{
StringInfoData buf;
initStringInfo(&buf);
if (ItemIdDeleted(itemid))
appendStringInfoString(&buf, "DELETED ");
if (ItemIdIsNormal(itemid))
appendStringInfoString(&buf, "USED ");
if (ItemIdIsDead(itemid))
appendStringInfoString(&buf, "DEAD ");
if (buf.len == 0)
appendStringInfoString(&buf, "UNUSED ");
buf.data[buf.len - 1] = '\0';
return cstring_to_text(buf.data);
}
开发者ID:AnLingm,项目名称:gpdb,代码行数:20,代码来源:indexscan.c
示例5: pgstat_index_page
/*
* pgstat_index_page -- for generic index page
*/
static void
pgstat_index_page(pgstattuple_type *stat, Page page,
OffsetNumber minoff, OffsetNumber maxoff)
{
OffsetNumber i;
stat->free_space += PageGetFreeSpace(page);
for (i = minoff; i <= maxoff; i = OffsetNumberNext(i))
{
ItemId itemid = PageGetItemId(page, i);
if (ItemIdIsDead(itemid))
{
stat->dead_tuple_count++;
stat->dead_tuple_len += ItemIdGetLength(itemid);
}
else
{
stat->tuple_count++;
stat->tuple_len += ItemIdGetLength(itemid);
}
}
}
开发者ID:markwkm,项目名称:postgres,代码行数:27,代码来源:pgstattuple.c
示例6: heap_page_prune
/*
* Prune and repair fragmentation in the specified page.
*
* Caller must have pin and buffer cleanup lock on the page.
*
* OldestXmin is the cutoff XID used to distinguish whether tuples are DEAD
* or RECENTLY_DEAD (see HeapTupleSatisfiesVacuum).
*
* If redirect_move is set, we remove redirecting line pointers by
* updating the root line pointer to point directly to the first non-dead
* tuple in the chain. NOTE: eliminating the redirect changes the first
* tuple's effective CTID, and is therefore unsafe except within VACUUM FULL.
* The only reason we support this capability at all is that by using it,
* VACUUM FULL need not cope with LP_REDIRECT items at all; which seems a
* good thing since VACUUM FULL is overly complicated already.
*
* If report_stats is true then we send the number of reclaimed heap-only
* tuples to pgstats. (This must be FALSE during vacuum, since vacuum will
* send its own new total to pgstats, and we don't want this delta applied
* on top of that.)
*
* Returns the number of tuples deleted from the page.
*/
int
heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
bool redirect_move, bool report_stats)
{
int ndeleted = 0;
Page page = BufferGetPage(buffer);
OffsetNumber offnum,
maxoff;
PruneState prstate;
/*
* Our strategy is to scan the page and make lists of items to change,
* then apply the changes within a critical section. This keeps as much
* logic as possible out of the critical section, and also ensures that
* WAL replay will work the same as the normal case.
*
* First, inform inval.c that upcoming CacheInvalidateHeapTuple calls are
* nontransactional.
*/
if (redirect_move)
BeginNonTransactionalInvalidation();
/*
* Initialize the new pd_prune_xid value to zero (indicating no prunable
* tuples). If we find any tuples which may soon become prunable, we will
* save the lowest relevant XID in new_prune_xid. Also initialize the rest
* of our working state.
*/
prstate.new_prune_xid = InvalidTransactionId;
prstate.nredirected = prstate.ndead = prstate.nunused = 0;
memset(prstate.marked, 0, sizeof(prstate.marked));
/* Scan the page */
maxoff = PageGetMaxOffsetNumber(page);
for (offnum = FirstOffsetNumber;
offnum <= maxoff;
offnum = OffsetNumberNext(offnum))
{
ItemId itemid;
/* Ignore items already processed as part of an earlier chain */
if (prstate.marked[offnum])
continue;
/* Nothing to do if slot is empty or already dead */
itemid = PageGetItemId(page, offnum);
if (!ItemIdIsUsed(itemid) || ItemIdIsDead(itemid))
continue;
/* Process this item or chain of items */
ndeleted += heap_prune_chain(relation, buffer, offnum,
OldestXmin,
&prstate,
redirect_move);
}
/*
* Send invalidation messages for any tuples we are about to move. It is
* safe to do this now, even though we could theoretically still fail
* before making the actual page update, because a useless cache
* invalidation doesn't hurt anything. Also, no one else can reload the
* tuples while we have exclusive buffer lock, so it's not too early to
* send the invals. This avoids sending the invals while inside the
* critical section, which is a good thing for robustness.
*/
if (redirect_move)
EndNonTransactionalInvalidation();
/* Any error while applying the changes is critical */
START_CRIT_SECTION();
/* Have we found any prunable items? */
if (prstate.nredirected > 0 || prstate.ndead > 0 || prstate.nunused > 0)
{
/*
* Apply the planned item changes, then repair page fragmentation, and
* update the page's hint bit about whether it has free line pointers.
//.........这里部分代码省略.........
开发者ID:PengJi,项目名称:gpdb-comments,代码行数:101,代码来源:pruneheap.c
示例7: hashgettuple
//.........这里部分代码省略.........
/*
* If we've already initialized this scan, we can just advance it in the
* appropriate direction. If we haven't done so yet, we call a routine to
* get the first item in the scan.
*/
current = &(so->hashso_curpos);
if (ItemPointerIsValid(current))
{
/*
* An insertion into the current index page could have happened while
* we didn't have read lock on it. Re-find our position by looking
* for the TID we previously returned. (Because we hold a pin on the
* primary bucket page, no deletions or splits could have occurred;
* therefore we can expect that the TID still exists in the current
* index page, at an offset >= where we were.)
*/
OffsetNumber maxoffnum;
buf = so->hashso_curbuf;
Assert(BufferIsValid(buf));
page = BufferGetPage(buf);
/*
* We don't need test for old snapshot here as the current buffer is
* pinned, so vacuum can't clean the page.
*/
maxoffnum = PageGetMaxOffsetNumber(page);
for (offnum = ItemPointerGetOffsetNumber(current);
offnum <= maxoffnum;
offnum = OffsetNumberNext(offnum))
{
IndexTuple itup;
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
if (ItemPointerEquals(&(so->hashso_heappos), &(itup->t_tid)))
break;
}
if (offnum > maxoffnum)
elog(ERROR, "failed to re-find scan position within index \"%s\"",
RelationGetRelationName(rel));
ItemPointerSetOffsetNumber(current, offnum);
/*
* Check to see if we should kill the previously-fetched tuple.
*/
if (scan->kill_prior_tuple)
{
/*
* Yes, so remember it for later. (We'll deal with all such tuples
* at once right after leaving the index page or at end of scan.)
* In case if caller reverses the indexscan direction it is quite
* possible that the same item might get entered multiple times.
* But, we don't detect that; instead, we just forget any excess
* entries.
*/
if (so->killedItems == NULL)
so->killedItems = palloc(MaxIndexTuplesPerPage *
sizeof(HashScanPosItem));
if (so->numKilled < MaxIndexTuplesPerPage)
{
so->killedItems[so->numKilled].heapTid = so->hashso_heappos;
so->killedItems[so->numKilled].indexOffset =
ItemPointerGetOffsetNumber(&(so->hashso_curpos));
so->numKilled++;
}
}
/*
* Now continue the scan.
*/
res = _hash_next(scan, dir);
}
else
res = _hash_first(scan, dir);
/*
* Skip killed tuples if asked to.
*/
if (scan->ignore_killed_tuples)
{
while (res)
{
offnum = ItemPointerGetOffsetNumber(current);
page = BufferGetPage(so->hashso_curbuf);
if (!ItemIdIsDead(PageGetItemId(page, offnum)))
break;
res = _hash_next(scan, dir);
}
}
/* Release read lock on current buffer, but keep it pinned */
if (BufferIsValid(so->hashso_curbuf))
LockBuffer(so->hashso_curbuf, BUFFER_LOCK_UNLOCK);
/* Return current heap TID on success */
scan->xs_ctup.t_self = so->hashso_heappos;
return res;
}
开发者ID:BertrandAreal,项目名称:postgres,代码行数:101,代码来源:hash.c
示例8: btree_xlog_delete_get_latestRemovedXid
//.........这里部分代码省略.........
if (!reachedConsistency)
elog(PANIC, "btree_xlog_delete_get_latestRemovedXid: cannot operate with inconsistent data");
/*
* Get index page. If the DB is consistent, this should not fail, nor
* should any of the heap page fetches below. If one does, we return
* InvalidTransactionId to cancel all HS transactions. That's probably
* overkill, but it's safe, and certainly better than panicking here.
*/
XLogRecGetBlockTag(record, 0, &rnode, NULL, &blkno);
ibuffer = XLogReadBufferExtended(rnode, MAIN_FORKNUM, blkno, RBM_NORMAL);
if (!BufferIsValid(ibuffer))
return InvalidTransactionId;
LockBuffer(ibuffer, BT_READ);
ipage = (Page) BufferGetPage(ibuffer);
/*
* Loop through the deleted index items to obtain the TransactionId from
* the heap items they point to.
*/
unused = (OffsetNumber *) ((char *) xlrec + SizeOfBtreeDelete);
for (i = 0; i < xlrec->nitems; i++)
{
/*
* Identify the index tuple about to be deleted
*/
iitemid = PageGetItemId(ipage, unused[i]);
itup = (IndexTuple) PageGetItem(ipage, iitemid);
/*
* Locate the heap page that the index tuple points at
*/
hblkno = ItemPointerGetBlockNumber(&(itup->t_tid));
hbuffer = XLogReadBufferExtended(xlrec->hnode, MAIN_FORKNUM, hblkno, RBM_NORMAL);
if (!BufferIsValid(hbuffer))
{
UnlockReleaseBuffer(ibuffer);
return InvalidTransactionId;
}
LockBuffer(hbuffer, BUFFER_LOCK_SHARE);
hpage = (Page) BufferGetPage(hbuffer);
/*
* Look up the heap tuple header that the index tuple points at by
* using the heap node supplied with the xlrec. We can't use
* heap_fetch, since it uses ReadBuffer rather than XLogReadBuffer.
* Note that we are not looking at tuple data here, just headers.
*/
hoffnum = ItemPointerGetOffsetNumber(&(itup->t_tid));
hitemid = PageGetItemId(hpage, hoffnum);
/*
* Follow any redirections until we find something useful.
*/
while (ItemIdIsRedirected(hitemid))
{
hoffnum = ItemIdGetRedirect(hitemid);
hitemid = PageGetItemId(hpage, hoffnum);
CHECK_FOR_INTERRUPTS();
}
/*
* If the heap item has storage, then read the header and use that to
* set latestRemovedXid.
*
* Some LP_DEAD items may not be accessible, so we ignore them.
*/
if (ItemIdHasStorage(hitemid))
{
htuphdr = (HeapTupleHeader) PageGetItem(hpage, hitemid);
HeapTupleHeaderAdvanceLatestRemovedXid(htuphdr, &latestRemovedXid);
}
else if (ItemIdIsDead(hitemid))
{
/*
* Conjecture: if hitemid is dead then it had xids before the xids
* marked on LP_NORMAL items. So we just ignore this item and move
* onto the next, for the purposes of calculating
* latestRemovedxids.
*/
}
else
Assert(!ItemIdIsUsed(hitemid));
UnlockReleaseBuffer(hbuffer);
}
UnlockReleaseBuffer(ibuffer);
/*
* If all heap tuples were LP_DEAD then we will be returning
* InvalidTransactionId here, which avoids conflicts. This matches
* existing logic which assumes that LP_DEAD tuples must already be older
* than the latestRemovedXid on the cleanup record that set them as
* LP_DEAD, hence must already have generated a conflict.
*/
return latestRemovedXid;
}
开发者ID:JiannengSun,项目名称:postgres,代码行数:101,代码来源:nbtxlog.c
示例9: _hash_splitbucket
/*
* _hash_splitbucket -- split 'obucket' into 'obucket' and 'nbucket'
*
* We are splitting a bucket that consists of a base bucket page and zero
* or more overflow (bucket chain) pages. We must relocate tuples that
* belong in the new bucket, and compress out any free space in the old
* bucket.
*
* The caller must hold exclusive locks on both buckets to ensure that
* no one else is trying to access them (see README).
*
* The caller must hold a pin, but no lock, on the metapage buffer.
* The buffer is returned in the same state. (The metapage is only
* touched if it becomes necessary to add or remove overflow pages.)
*
* In addition, the caller must have created the new bucket's base page,
* which is passed in buffer nbuf, pinned and write-locked. That lock and
* pin are released here. (The API is set up this way because we must do
* _hash_getnewbuf() before releasing the metapage write lock. So instead of
* passing the new bucket's start block number, we pass an actual buffer.)
*/
static void
_hash_splitbucket(Relation rel,
Buffer metabuf,
Bucket obucket,
Bucket nbucket,
BlockNumber start_oblkno,
Buffer nbuf,
uint32 maxbucket,
uint32 highmask,
uint32 lowmask)
{
Buffer obuf;
Page opage;
Page npage;
HashPageOpaque oopaque;
HashPageOpaque nopaque;
/*
* It should be okay to simultaneously write-lock pages from each bucket,
* since no one else can be trying to acquire buffer lock on pages of
* either bucket.
*/
obuf = _hash_getbuf(rel, start_oblkno, HASH_WRITE, LH_BUCKET_PAGE);
opage = BufferGetPage(obuf);
oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
npage = BufferGetPage(nbuf);
/* initialize the new bucket's primary page */
nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
nopaque->hasho_prevblkno = InvalidBlockNumber;
nopaque->hasho_nextblkno = InvalidBlockNumber;
nopaque->hasho_bucket = nbucket;
nopaque->hasho_flag = LH_BUCKET_PAGE;
nopaque->hasho_page_id = HASHO_PAGE_ID;
/*
* Partition the tuples in the old bucket between the old bucket and the
* new bucket, advancing along the old bucket's overflow bucket chain and
* adding overflow pages to the new bucket as needed. Outer loop iterates
* once per page in old bucket.
*/
for (;;)
{
BlockNumber oblkno;
OffsetNumber ooffnum;
OffsetNumber omaxoffnum;
OffsetNumber deletable[MaxOffsetNumber];
int ndeletable = 0;
/* Scan each tuple in old page */
omaxoffnum = PageGetMaxOffsetNumber(opage);
for (ooffnum = FirstOffsetNumber;
ooffnum <= omaxoffnum;
ooffnum = OffsetNumberNext(ooffnum))
{
IndexTuple itup;
Size itemsz;
Bucket bucket;
/* skip dead tuples */
if (ItemIdIsDead(PageGetItemId(opage, ooffnum)))
continue;
/*
* Fetch the item's hash key (conveniently stored in the item) and
* determine which bucket it now belongs in.
*/
itup = (IndexTuple) PageGetItem(opage,
PageGetItemId(opage, ooffnum));
bucket = _hash_hashkey2bucket(_hash_get_indextuple_hashkey(itup),
maxbucket, highmask, lowmask);
if (bucket == nbucket)
{
/*
* insert the tuple into the new bucket. if it doesn't fit on
* the current page in the new bucket, we must allocate a new
* overflow page and place the tuple on that page instead.
//.........这里部分代码省略.........
开发者ID:johto,项目名称:postgres,代码行数:101,代码来源:hashpage.c
示例10: hashgettuple
/*
* hashgettuple() -- Get the next tuple in the scan.
*/
bool
hashgettuple(IndexScanDesc scan, ScanDirection dir)
{
HashScanOpaque so = (HashScanOpaque) scan->opaque;
Relation rel = scan->indexRelation;
Buffer buf;
Page page;
OffsetNumber offnum;
ItemPointer current;
bool res;
/* Hash indexes are always lossy since we store only the hash code */
scan->xs_recheck = true;
/*
* We hold pin but not lock on current buffer while outside the hash AM.
* Reacquire the read lock here.
*/
if (BufferIsValid(so->hashso_curbuf))
_hash_chgbufaccess(rel, so->hashso_curbuf, HASH_NOLOCK, HASH_READ);
/*
* If we've already initialized this scan, we can just advance it in the
* appropriate direction. If we haven't done so yet, we call a routine to
* get the first item in the scan.
*/
current = &(so->hashso_curpos);
if (ItemPointerIsValid(current))
{
/*
* An insertion into the current index page could have happened while
* we didn't have read lock on it. Re-find our position by looking
* for the TID we previously returned. (Because we hold share lock on
* the bucket, no deletions or splits could have occurred; therefore
* we can expect that the TID still exists in the current index page,
* at an offset >= where we were.)
*/
OffsetNumber maxoffnum;
buf = so->hashso_curbuf;
Assert(BufferIsValid(buf));
page = BufferGetPage(buf);
TestForOldSnapshot(scan->xs_snapshot, rel, page);
maxoffnum = PageGetMaxOffsetNumber(page);
for (offnum = ItemPointerGetOffsetNumber(current);
offnum <= maxoffnum;
offnum = OffsetNumberNext(offnum))
{
IndexTuple itup;
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
if (ItemPointerEquals(&(so->hashso_heappos), &(itup->t_tid)))
break;
}
if (offnum > maxoffnum)
elog(ERROR, "failed to re-find scan position within index \"%s\"",
RelationGetRelationName(rel));
ItemPointerSetOffsetNumber(current, offnum);
/*
* Check to see if we should kill the previously-fetched tuple.
*/
if (scan->kill_prior_tuple)
{
/*
* Yes, so mark it by setting the LP_DEAD state in the item flags.
*/
ItemIdMarkDead(PageGetItemId(page, offnum));
/*
* Since this can be redone later if needed, mark as a hint.
*/
MarkBufferDirtyHint(buf, true);
}
/*
* Now continue the scan.
*/
res = _hash_next(scan, dir);
}
else
res = _hash_first(scan, dir);
/*
* Skip killed tuples if asked to.
*/
if (scan->ignore_killed_tuples)
{
while (res)
{
offnum = ItemPointerGetOffsetNumber(current);
page = BufferGetPage(so->hashso_curbuf);
if (!ItemIdIsDead(PageGetItemId(page, offnum)))
break;
res = _hash_next(scan, dir);
}
}
//.........这里部分代码省略.........
开发者ID:Gordiychuk,项目名称:postgres,代码行数:101,代码来源:hash.c
示例11: gistnext
//.........这里部分代码省略.........
while( ntids < maxtids && so->curPageData < so->nPageData )
{
tids[ ntids ] = scan->xs_ctup.t_self =
so->pageData[ so->curPageData ].heapPtr;
ItemPointerSet(&(so->curpos),
BufferGetBlockNumber(so->curbuf),
so->pageData[ so->curPageData ].pageOffset);
so->curPageData ++;
ntids++;
}
if ( ntids == maxtids )
{
LockBuffer(so->curbuf, GIST_UNLOCK);
MIRROREDLOCK_BUFMGR_UNLOCK;
// -------- MirroredLock ----------
return ntids;
}
/*
* We ran out of matching index entries on the current page,
* so pop the top stack entry and use it to continue the
* search.
*/
LockBuffer(so->curbuf, GIST_UNLOCK);
stk = so->stack->next;
pfree(so->stack);
so->stack = stk;
/* If we're out of stack entries, we're done */
if (so->stack == NULL)
{
ReleaseBuffer(so->curbuf);
so->curbuf = InvalidBuffer;
MIRROREDLOCK_BUFMGR_UNLOCK;
// -------- MirroredLock ----------
return ntids;
}
so->curbuf = ReleaseAndReadBuffer(so->curbuf,
scan->indexRelation,
stk->block);
/* XXX go up */
break;
}
if (GistPageIsLeaf(p))
{
/*
* We've found a matching index entry in a leaf page, so
* return success. Note that we keep "curbuf" pinned so that
* we can efficiently resume the index scan later.
*/
if (!(ignore_killed_tuples && ItemIdIsDead(PageGetItemId(p, n))))
{
it = (IndexTuple) PageGetItem(p, PageGetItemId(p, n));
so->pageData[ so->nPageData ].heapPtr = it->t_tid;
so->pageData[ so->nPageData ].pageOffset = n;
so->nPageData ++;
}
}
else
{
/*
* We've found an entry in an internal node whose key is
* consistent with the search key, so push it to stack
*/
stk = (GISTSearchStack *) palloc(sizeof(GISTSearchStack));
it = (IndexTuple) PageGetItem(p, PageGetItemId(p, n));
stk->block = ItemPointerGetBlockNumber(&(it->t_tid));
memset(&(stk->lsn), 0, sizeof(GistNSN));
stk->parentlsn = so->stack->lsn;
stk->next = so->stack->next;
so->stack->next = stk;
}
if (ScanDirectionIsBackward(dir))
n = OffsetNumberPrev(n);
else
n = OffsetNumberNext(n);
}
}
MIRROREDLOCK_BUFMGR_UNLOCK;
// -------- MirroredLock ----------
return ntids;
}
开发者ID:50wu,项目名称:gpdb,代码行数:101,代码来源:gistget.c
示例12: _bt_checkkeys
/*
* Test whether an indextuple satisfies all the scankey conditions.
*
* If so, copy its TID into scan->xs_ctup.t_self, and return TRUE.
* If not, return FALSE (xs_ctup is not changed).
*
* If the tuple fails to pass the qual, we also determine whether there's
* any need to continue the scan beyond this tuple, and set *continuescan
* accordingly. See comments for _bt_preprocess_keys(), above, about how
* this is done.
*
* scan: index scan descriptor (containing a search-type scankey)
* page: buffer page containing index tuple
* offnum: offset number of index tuple (must be a valid item!)
* dir: direction we are scanning in
* continuescan: output parameter (will be set correctly in all cases)
*/
bool
_bt_checkkeys(IndexScanDesc scan,
Page page, OffsetNumber offnum,
ScanDirection dir, bool *continuescan)
{
ItemId iid = PageGetItemId(page, offnum);
bool tuple_valid;
IndexTuple tuple;
TupleDesc tupdesc;
BTScanOpaque so;
int keysz;
int ikey;
ScanKey key;
*continuescan = true; /* default assumption */
/*
* If the scan specifies not to return killed tuples, then we treat a
* killed tuple as not passing the qual. Most of the time, it's a win to
* not bother examining the tuple's index keys, but just return
* immediately with continuescan = true to proceed to the next tuple.
* However, if this is the last tuple on the page, we should check the
* index keys to prevent uselessly advancing to the next page.
*/
if (scan->ignore_killed_tuples && ItemIdIsDead(iid))
{
/* return immediately if there are more tuples on the page */
if (ScanDirectionIsForward(dir))
{
if (offnum < PageGetMaxOffsetNumber(page))
return false;
}
else
{
BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
if (offnum > P_FIRSTDATAKEY(opaque))
return false;
}
/*
* OK, we want to check the keys, but we'll return FALSE even if the
* tuple passes the key tests.
*/
tuple_valid = false;
}
else
tuple_valid = true;
tuple = (IndexTuple) PageGetItem(page, iid);
IncrIndexProcessed();
tupdesc = RelationGetDescr(scan->indexRelation);
so = (BTScanOpaque) scan->opaque;
keysz = so->numberOfKeys;
for (key = so->keyData, ikey = 0; ikey < keysz; key++, ikey++)
{
Datum datum;
bool isNull;
Datum test;
/* row-comparison keys need special processing */
if (key->sk_flags & SK_ROW_HEADER)
{
if (_bt_check_rowcompare(key, tuple, tupdesc, dir, continuescan))
continue;
return false;
}
datum = index_getattr(tuple,
key->sk_attno,
tupdesc,
&isNull);
if (key->sk_flags & SK_ISNULL)
{
/* Handle IS NULL tests */
Assert(key->sk_flags & SK_SEARCHNULL);
if (isNull)
continue; /* tuple satisfies this qual */
//.........这里部分代码省略.........
开发者ID:legendOfZelda,项目名称:LDV,代码行数:101,代码来源:nbtutils.c
示例13: gistScanPage
//.........这里部分代码省略.........
pairingheap_add(so->queue, &item->phNode);
MemoryContextSwitchTo(oldcxt);
}
so->nPageData = so->curPageData = 0;
scan->xs_hitup = NULL; /* might point into pageDataCxt */
if (so->pageDataCxt)
MemoryContextReset(so->pageDataCxt);
/*
* We save the LSN of the page as we read it, so that we know whether it
* safe to apply LP_DEAD hints to the page later. This allows us to drop
* the pin for MVCC scans, which allows vacuum to avoid blocking.
*/
so->curPageLSN = BufferGetLSNAtomic(buffer);
/*
* check all tuples on page
*/
maxoff = PageGetMaxOffsetNumber(page);
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
ItemId iid = PageGetItemId(page, i);
IndexTuple it;
bool match;
bool recheck;
bool recheck_distances;
/*
* If the scan specifies not to return killed tuples, then we treat a
* killed tuple as not passing the qual.
*/
if (scan->ignore_killed_tuples && ItemIdIsDead(iid))
continue;
it = (IndexTuple) PageGetItem(page, iid);
/*
* Must call gistindex_keytest in tempCxt, and clean up any leftover
* junk afterward.
*/
oldcxt = MemoryContextSwitchTo(so->giststate->tempCxt);
match = gistindex_keytest(scan, it, page, i,
&recheck, &recheck_distances);
MemoryContextSwitchTo(oldcxt);
MemoryContextReset(so->giststate->tempCxt);
/* Ignore tuple if it doesn't match */
if (!match)
continue;
if (tbm && GistPageIsLeaf(page))
{
/*
* getbitmap scan, so just push heap tuple TIDs into the bitmap
* without worrying about ordering
*/
tbm_add_tuples(tbm, &it->t_tid, 1, recheck);
(*ntids)++;
}
else if (scan->numberOfOrderBys == 0 && GistPageIsLeaf(page))
{
/*
开发者ID:Brar,项目名称:postgres,代码行数:67,代码来源:gistget.c
示例14: lazy_scan_heap
//.........这里部分代码省略.........
hastup = false;
prev_dead_count = vacrelstats->num_dead_tuples;
maxoff = PageGetMaxOffsetNumber(page);
for (offnum = FirstOffsetNumber;
offnum <= maxoff;
offnum = OffsetNumberNext(offnum))
{
ItemId itemid;
itemid = PageGetItemId(page, offnum);
/* Unused items require no processing, but we count 'em */
if (!ItemIdIsUsed(itemid))
{
nunused += 1;
continue;
}
/* Redirect items mustn't be touched */
if (ItemIdIsRedirected(itemid))
{
hastup = true; /* this page won't be truncatable */
continue;
}
ItemPointerSet(&(tuple.t_self), blkno, offnum);
/*
* DEAD item pointers are to be vacuumed normally; but we don't
* count them in tups_vacuumed, else we'd be double-counting (at
* least in the common case where heap_page_prune() just freed up
* a non-HOT tuple).
*/
if (ItemIdIsDead(itemid))
{
lazy_record_dead_tuple(vacrelstats, &(tuple.t_self));
continue;
}
Assert(ItemIdIsNormal(itemid));
tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
tuple.t_len = ItemIdGetLength(itemid);
tupgone = false;
switch (HeapTupleSatisfiesVacuum(onerel, tuple.t_data, OldestXmin, buf))
{
case HEAPTUPLE_DEAD:
/*
* Ordinarily, DEAD tuples would have been removed by
* heap_page_prune(), but it's possible that the tuple
* state changed since heap_page_prune() looked. In
* particular an INSERT_IN_PROGRESS tuple could have
* changed to DEAD if the inserter aborted. So this
* cannot be considered an error condition.
*
* If the tuple is HOT-updated then it must only be
* removed by a prune operation; so we keep it just as if
* it were RECENTLY_DEAD. Also, if it's a heap-only
* tuple, we choose to keep it, because it'll be a lot
* cheaper to get rid of it in the next pruning pass than
* to treat it like an indexed tuple.
*/
if (HeapTupleIsHotUpdated(&tuple) ||
开发者ID:phan-pivotal,项目名称:gpdb,代码行数:67,代码来源:vacuumlazy.c
示例15: _hash_squeezebucket
//.........这里部分代码省略.........
rpage = BufferGetPage(rbuf);
ropaque = (HashPageOpaque) PageGetSpecialPointer(rpage);
Assert(ropaque->hasho_bucket == bucket);
} while (BlockNumberIsValid(ropaque->hasho_nextblkno));
/*
* squeeze the tuples.
*/
for (;;)
{
OffsetNumber roffnum;
OffsetNumber maxroffnum;
OffsetNumber deletable[MaxOffsetNumber];
IndexTuple itups[MaxIndexTuplesPerPage];
Size tups_size[MaxIndexTuplesPerPage];
OffsetNumber itup_offsets[MaxIndexTuplesPerPage];
uint16 ndeletable = 0;
uint16 nitups = 0;
Size all_tups_size = 0;
int i;
bool retain_pin = false;
readpage:
/* Scan each tuple in "read" page */
maxroffnum = PageGetMaxOffsetNumber(rpage);
for (roffnum = FirstOffsetNumber;
roffnum <= maxroffnum;
roffnum = OffsetNumberNext(roffnum))
{
IndexTuple itup;
Size itemsz;
/* skip dead tuples */
if (ItemIdIsDead(PageGetItemId(rpage, roffnum)))
continue;
itup = (IndexTuple) PageGetItem(rpage,
PageGetItemId(rpage, roffnum));
itemsz = IndexTupleDSize(*itup);
itemsz = MAXALIGN(itemsz);
/*
* Walk up the bucket chain, looking for a page big enough for
* this item and all other accumulated items. Exit if we reach
* the read page.
*/
while (PageGetFreeSpaceForMultipleTuples(wpage, nitups + 1) < (all_tups_size + itemsz))
{
Buffer next_wbuf = InvalidBuffer;
bool tups_moved = false;
Assert(!PageIsEmpty(wpage));
if (wblkno == bucket_blkno)
retain_pin = true;
wblkno = wopaque->hasho_nextblkno;
Assert(BlockNumberIsValid(wblkno));
/* don't need to move to next page if we reached the read page */
if (wblkno != rblkno)
next_wbuf = _hash_getbuf_with_strategy(rel,
wblkno,
HASH_WRITE,
LH_OVERFLOW_PAGE,
bstrategy);
开发者ID:bitnine-oss,项目名称:agens-graph,代码行数:67,代码来源:hashovfl.c
示例16: statapprox_heap
/*
* This function takes an already open relation and scans its pages,
* skipping those that have the corresponding visibility map bit set.
* For pages we skip, we find the free space from the free space map
* and approximate tuple_len on that basis. For the others, we count
* the exact number of dead tuples etc.
*
* This scan is loosely based on vacuumlazy.c:lazy_scan_heap(), but
* we do not try to avoid skipping single pages.
*/
static void
statapprox_heap(Relation rel, output_type *stat)
{
BlockNumber scanned,
nblocks,
blkno;
Buffer vmbuffer = InvalidBuffer;
BufferAccessStrategy bstrategy;
TransactionId OldestXmin;
uint64 misc_count = 0;
OldestXmin = GetOldestXmin(rel, PROCARRAY_FLAGS_VACUUM);
bstrategy = GetAccessStrategy(BAS_BULKREAD);
nblocks = RelationGetNumberOfBlocks(rel);
scanned = 0;
for (blkno = 0; blkno < nblocks; blkno++)
{
Buffer buf;
Page page;
OffsetNumber offnum,
maxoff;
Size freespace;
CHECK_FOR_INTERRUPTS();
/*
* If the page has only visible tuples, then we can find out the free
* space from the FSM and move on.
*/
if (VM_ALL_VISIBLE(rel, blkno, &vmbuffer))
{
freespace = GetRecordedFreeSpace(rel, blkno);
stat->tuple_len += BLCKSZ - freespace;
stat->free_space += freespace;
continue;
}
buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno,
RBM_NORMAL, bstrategy);
LockBuffer(buf, BUFFER_LOCK_SHARE);
page = BufferGetPage(buf);
/*
* It's not safe to call PageGetHeapFreeSpace() on new pages, so we
* treat them as being free space for our purposes.
*/
if (!PageIsNew(page))
stat->free_space += PageGetHeapFreeSpace(page);
else
stat->free_space += BLCKSZ - SizeOfPageHeaderData;
if (PageIsNew(page) || PageIsEmpty(page))
{
UnlockReleaseBuffer(buf);
continue;
}
scanned++;
/*
* Look at each tuple on the page and decide whether it's live or
* dead, then count it and its size. Unlike lazy_scan_heap, we can
* afford to ignore problems and special cases.
*/
maxoff = PageGetMaxOffsetNumber(page);
for (offnum = FirstOffsetNumber;
offnum <= maxoff;
offnum = OffsetNumberNext(offnum))
{
ItemId itemid;
HeapTupleData tuple;
itemid = PageGetItemId(page, offnum);
if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid) ||
ItemIdIsDead(itemid))
{
continue;
}
Assert(ItemIdIsNormal(itemid));
ItemPointerSet(&(tuple.t_self), blkno, offnum);
tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
//.........这里部分代码省略.........
开发者ID:dreamsxin,项目名称:postgresql-1,代码行数:101,代码来源:pgstatapprox.c
示例17: heap_prune_chain
//.........这里部分代码省略.........
/* Some sanity checks */
if (offnum < FirstOffsetNumber || offnum > maxoff)
break;
/* If item is already processed, stop --- it must not be same chain */
if (prstate->marked[offnum])
break;
lp = PageGetItemId(dp, offnum);
/* Unused item obviously isn't part of the chain */
if (!ItemIdIsUsed(lp))
break;
/*
* If we are looking at the redirected root line pointer, jump to the
* first normal tuple in the chain. If we find a redirect somewhere
* else, stop --- it must not be same chain.
*/
if (ItemIdIsRedirected(lp))
{
if (nchain > 0)
break; /* not at start of chain */
chainitems[nchain++] = offnum;
offnum = ItemIdGetRedirect(rootlp);
continue;
}
/*
* Likewise, a dead item pointer can't be part of the chain. (We
* already eliminated the case of dead root tuple outside this
* function.)
*/
if (ItemIdIsDead(lp))
break;
Assert(ItemIdIsNormal(lp));
htup = (HeapTupleHeader) PageGetItem(dp, lp);
/*
* Check the tuple XMIN against prior XMAX, if any
*/
if (TransactionIdIsValid(priorXmax) &&
!TransactionIdEquals(HeapTupleHeaderGetXmin(htup), priorXmax))
break;
/*
* OK, this tuple is indeed a member of the chain.
*/
chainitems[nchain++] = offnum;
/*
* Check tuple's visibility status.
*/
tupdead = recent_dead = false;
switch (HeapTupleSatisfiesVacuum(relation, htup, OldestXmin, buffer))
{
case HEAPTUPLE_DEAD:
tupdead = true;
break;
case HEAPTUPLE_RECENTLY_DEAD:
recent_dead = true;
/*
开发者ID:PengJi,项目名称:gpdb-comments,代码行数:67,代码来源:pruneheap.c
示例18: heap_get_root_tuples
/*
* For all items in this page, find their respective root line pointers.
* If item k is part of a HOT-chain with root at item j, then we set
* root_offsets[k - 1] = j.
*
* The passed-in root_offsets array must have MaxHeapTuplesPerPage entries.
* We zero out all unused entries.
*
* The function must be called with at least share lock on the buffer, to
* prevent concurrent prune operations.
*
* Note: The information collected here is valid only as long as the caller
* holds a pin on the buffer. Once pin is released, a tuple might be pruned
* and reused by a completely unrelated tuple.
*/
void
heap_get_root_tuples(Page page, OffsetNumber *root_offsets)
{
OffsetNumber offnum,
maxoff;
MemSet(root_offsets, 0, MaxHeapTuplesPerPage * sizeof(OffsetNumber));
maxoff = PageGetMaxOffsetNumber(page);
for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum))
{
ItemId lp = PageGetItemId(page, offnum);
HeapTupleHeader htup;
OffsetNumber nextoffnum;
TransactionId priorXmax;
/* skip unused and dead items */
if (!ItemIdIsUsed(lp) || ItemIdIsDead(lp))
continue;
if (ItemIdIsNormal(lp))
{
htup = (HeapTupleHeader) PageGetItem(page, lp);
/*
* Check if this tuple is part of a HOT-chain rooted at some other
* tuple. If so, skip it for now; we'll process it when we find
* its root.
*/
if (HeapTupleHeaderIsHeapOnly(htup))
continue;
/*
* This is either a plain tuple or the root of a HOT-chain.
* Remember it in the mapping.
*/
root_offsets[offnum - 1] = offnum;
/* If it's not the start of a HOT-chain, we're done with it */
if (!HeapTupleHeaderIsHotUpdated(htup))
continue;
/* Set up to scan the HOT-chain */
nextoffnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
priorXmax = HeapTupleHeaderGetXmax(htup);
}
else
{
/* Must be a redirect item. We do not set its root_offsets entry */
Assert(ItemIdIsRedirected(lp));
/* Set up to scan the HOT-chain */
nextoffnum = ItemIdGetRedirect(lp);
priorXmax = InvalidTransactionId;
}
/*
* Now follow the HOT-chain and collect other tuples in the chain.
*
* Note: Even though this is a nested loop, the complexity of the
* function is O(N) because a tuple in the page should be visited not
* more than twice, once in the outer loop and once in HOT-chain
* chases.
*/
for (;;)
{
lp = PageGetItemId(page, nextoffnum);
/* Check for broken chains */
if (!ItemIdIsNormal(lp))
break;
htup = (HeapTupleHeader) PageGetItem(page, lp);
if (TransactionIdIsValid(priorXmax) &&
!TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(htup)))
break;
/* Remember the root line pointer for this item */
root_offsets[nextoffnum - 1] = offnum;
/* Advance to next chain memb
|
请发表评论