本文整理汇总了Java中org.apache.cassandra.db.OnDiskAtom类的典型用法代码示例。如果您正苦于以下问题:Java OnDiskAtom类的具体用法?Java OnDiskAtom怎么用?Java OnDiskAtom使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
OnDiskAtom类属于org.apache.cassandra.db包,在下文中一共展示了OnDiskAtom类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: computeNext
import org.apache.cassandra.db.OnDiskAtom; //导入依赖的package包/类
protected OnDiskAtom computeNext()
{
if (i++ >= columns)
return endOfData();
OnDiskAtom column;
try
{
file.reset(mark);
column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
}
catch (IOException e)
{
throw new CorruptSSTableException(e, file.getPath());
}
if (finishColumn.remaining() > 0 && comparator.compare(column.name(), finishColumn) > 0)
return endOfData();
mark = file.mark();
return column;
}
开发者ID:dprguiuc,项目名称:Cassandra-Wasef,代码行数:22,代码来源:SimpleSliceReader.java
示例2: addColumn
import org.apache.cassandra.db.OnDiskAtom; //导入依赖的package包/类
protected void addColumn(OnDiskAtom col)
{
if (reversed)
{
/*
* We put range tomstone markers at the beginning of the range they delete. But for reversed queries,
* the caller still need to know about a RangeTombstone before it sees any column that it covers.
* To make that simple, we keep said tombstones separate and return them all before any column for
* a given block.
*/
if (col instanceof RangeTombstone)
rangeTombstonesReversed.addFirst(col);
else
blockColumns.addFirst(col);
}
else
{
blockColumns.addLast(col);
}
}
开发者ID:dprguiuc,项目名称:Cassandra-Wasef,代码行数:21,代码来源:IndexedSliceReader.java
示例3: map
import org.apache.cassandra.db.OnDiskAtom; //导入依赖的package包/类
@Override protected void map(AegisthusKey key, AtomWritable value,
Context context)
throws IOException, InterruptedException {
if (currentKey == null) {
currentKey = key.getKey();
} else if (!currentKey.equals(key.getKey())) {
flushCgm(context);
currentKey = key.getKey();
}
OnDiskAtom atom = value.getAtom();
if (atom == null) {
LOG.warn("Got null atom for key {}.", cfMetaData.getKeyValidator().compose(key.getKey()));
return;
}
if (atom instanceof Column) {
cgmBuilder.add((Column) atom);
} else {
LOG.error("Non-colum atom. {} {}", atom.getClass(), atom);
throw new IllegalArgumentException("Got a non-column Atom.");
}
}
开发者ID:Netflix,项目名称:aegisthus,代码行数:24,代码来源:CQLMapper.java
示例4: finalizeReduce
import org.apache.cassandra.db.OnDiskAtom; //导入依赖的package包/类
public void finalizeReduce() {
if (currentColumn != null) {
columns.add(currentColumn);
}
// When cassandra compacts it removes columns that are in deleted rows
// that are older than the deleted timestamp.
// we will duplicate this behavior. If the etl needs this data at some
// point we can change, but it is only available assuming
// cassandra hasn't discarded it.
Iterator<OnDiskAtom> columnIterator = columns.iterator();
while (columnIterator.hasNext()) {
OnDiskAtom atom = columnIterator.next();
if (atom instanceof RangeTombstone) {
columnIterator.remove();
} else if (atom instanceof Column && ((Column) atom).timestamp() <= this.deletedAt) {
columnIterator.remove();
}
}
}
开发者ID:Netflix,项目名称:aegisthus,代码行数:21,代码来源:CassSSTableReducer.java
示例5: cleanup
import org.apache.cassandra.db.OnDiskAtom; //导入依赖的package包/类
@Override
public SSTableIdentityIterator cleanup(SSTableIdentityIterator row)
{
if (Range.isInRanges(row.getKey().getToken(), ranges))
return row;
cfs.invalidateCachedRow(row.getKey());
if (indexedColumnsInRow != null)
indexedColumnsInRow.clear();
while (row.hasNext())
{
OnDiskAtom column = row.next();
if (column instanceof Cell && cfs.indexManager.indexes((Cell) column))
{
if (indexedColumnsInRow == null)
indexedColumnsInRow = new ArrayList<>();
indexedColumnsInRow.add((Cell) column);
}
}
if (indexedColumnsInRow != null && !indexedColumnsInRow.isEmpty())
{
// acquire memtable lock here because secondary index deletion may cause a race. See CASSANDRA-3712
try (OpOrder.Group opGroup = cfs.keyspace.writeOrder.start())
{
cfs.indexManager.deleteFromIndexes(row.getKey(), indexedColumnsInRow, opGroup);
}
}
return null;
}
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:35,代码来源:CompactionManager.java
示例6: collateOnDiskAtom
import org.apache.cassandra.db.OnDiskAtom; //导入依赖的package包/类
public static void collateOnDiskAtom(ColumnFamily returnCF,
List<? extends Iterator<? extends OnDiskAtom>> toCollate,
IDiskAtomFilter filter,
int gcBefore,
long timestamp)
{
List<Iterator<Cell>> filteredIterators = new ArrayList<>(toCollate.size());
for (Iterator<? extends OnDiskAtom> iter : toCollate)
filteredIterators.add(gatherTombstones(returnCF, iter));
collateColumns(returnCF, filteredIterators, filter, gcBefore, timestamp);
}
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:12,代码来源:QueryFilter.java
示例7: IndexedBlockFetcher
import org.apache.cassandra.db.OnDiskAtom; //导入依赖的package包/类
public IndexedBlockFetcher(long basePosition)
{
super(-1);
this.basePosition = basePosition;
this.prefetched = reversed ? new ArrayDeque<OnDiskAtom>() : null;
setNextSlice();
}
开发者ID:dprguiuc,项目名称:Cassandra-Wasef,代码行数:8,代码来源:IndexedSliceReader.java
示例8: SimpleBlockFetcher
import org.apache.cassandra.db.OnDiskAtom; //导入依赖的package包/类
public SimpleBlockFetcher() throws IOException
{
// Since we have to deserialize in order and will read all slices might as well reverse the slices and
// behave as if it was not reversed
super(reversed ? slices.length - 1 : 0);
// We remenber when we are whithin a slice to avoid some comparison
boolean inSlice = false;
OnDiskAtom.Serializer atomSerializer = emptyColumnFamily.getOnDiskSerializer();
int columns = file.readInt();
for (int i = 0; i < columns; i++)
{
OnDiskAtom column = atomSerializer.deserializeFromSSTable(file, sstable.descriptor.version);
// col is before slice
// (If in slice, don't bother checking that until we change slice)
if (!inSlice && isColumnBeforeSliceStart(column))
continue;
// col is within slice
if (isColumnBeforeSliceFinish(column))
{
inSlice = true;
addColumn(column);
}
// col is after slice. more slices?
else
{
inSlice = false;
if (!setNextSlice())
break;
}
}
}
开发者ID:dprguiuc,项目名称:Cassandra-Wasef,代码行数:37,代码来源:IndexedSliceReader.java
示例9: cleanup
import org.apache.cassandra.db.OnDiskAtom; //导入依赖的package包/类
@Override
public SSTableIdentityIterator cleanup(SSTableIdentityIterator row)
{
if (Range.isInRanges(row.getKey().token, ranges))
return row;
cfs.invalidateCachedRow(row.getKey());
if (indexedColumnsInRow != null)
indexedColumnsInRow.clear();
while (row.hasNext())
{
OnDiskAtom column = row.next();
if (column instanceof Cell && cfs.indexManager.indexes((Cell) column))
{
if (indexedColumnsInRow == null)
indexedColumnsInRow = new ArrayList<>();
indexedColumnsInRow.add((Cell) column);
}
}
if (indexedColumnsInRow != null && !indexedColumnsInRow.isEmpty())
{
// acquire memtable lock here because secondary index deletion may cause a race. See CASSANDRA-3712
try (OpOrder.Group opGroup = cfs.keyspace.writeOrder.start())
{
cfs.indexManager.deleteFromIndexes(row.getKey(), indexedColumnsInRow, opGroup);
}
}
return null;
}
开发者ID:rajath26,项目名称:cassandra-trunk,代码行数:35,代码来源:CompactionManager.java
示例10: collateOnDiskAtom
import org.apache.cassandra.db.OnDiskAtom; //导入依赖的package包/类
public static void collateOnDiskAtom(final ColumnFamily returnCF, List<? extends Iterator<? extends OnDiskAtom>> toCollate, IDiskAtomFilter filter, int gcBefore, long timestamp)
{
List<Iterator<Cell>> filteredIterators = new ArrayList<Iterator<Cell>>(toCollate.size());
for (Iterator<? extends OnDiskAtom> iter : toCollate)
filteredIterators.add(gatherTombstones(returnCF, iter));
collateColumns(returnCF, filteredIterators, filter, gcBefore, timestamp);
}
开发者ID:rajath26,项目名称:cassandra-trunk,代码行数:8,代码来源:QueryFilter.java
示例11: addAtom
import org.apache.cassandra.db.OnDiskAtom; //导入依赖的package包/类
@SuppressWarnings("StatementWithEmptyBody")
public void addAtom(AtomWritable writable) {
OnDiskAtom atom = writable.getAtom();
if (atom == null) {
return;
}
atomTotalSize += atom.serializedSizeForSSTable();
this.tombstoneTracker.update(atom);
// Right now, we will only keep columns. This works because we will
// have all the columns a range tombstone applies to when we create
// a snapshot. This will not be true if we go to partial incremental
// processing
if (atom instanceof Column) {
Column column = (Column) atom;
if (this.tombstoneTracker.isDeleted(column)) {
// If the column is deleted by the rangeTombstone, just discard
// it, every other column of the same name will be discarded as
// well, unless it is later than the range tombstone in which
// case the column is out of date anyway
} else if (currentColumn == null) {
currentColumn = column;
} else if (currentColumn.name().equals(column.name())) {
if (column.timestamp() > currentColumn.minTimestamp()) {
currentColumn = column;
}
} else {
columns.add(currentColumn);
currentColumn = column;
}
} else if (atom instanceof RangeTombstone) {
// We do not include these columns in the output since they are deleted
} else {
String error =
"Cassandra added a new type " + atom.getClass().getCanonicalName() + " which we do not support";
throw new IllegalArgumentException(error);
}
}
开发者ID:Netflix,项目名称:aegisthus,代码行数:40,代码来源:CassSSTableReducer.java
示例12: deserializeColumns
import org.apache.cassandra.db.OnDiskAtom; //导入依赖的package包/类
long deserializeColumns(Subscriber<? super AtomWritable> subscriber, byte[] rowKey, long deletedAt,
int count,
DataInput columns) throws IOException {
long columnSize = 0;
int actualColumnCount = 0;
for (int i = 0; i < count; i++, actualColumnCount++) {
// serialize columns
OnDiskAtom atom = serializer.deserializeFromSSTable(
columns, ColumnSerializer.Flag.PRESERVE_SIZE, Integer.MIN_VALUE, version
);
if (atom == null) {
// If atom was null that means this was a version that does not have version.hasRowSizeAndColumnCount
// So we have to add the size for the end of row marker also
columnSize += 2;
break;
}
columnSize += atom.serializedSizeForSSTable();
subscriber.onNext(AtomWritable.createWritable(rowKey, deletedAt, atom));
}
// This is a row with no columns, we still create a writable because we want to preserve this information
if (actualColumnCount == 0) {
subscriber.onNext(AtomWritable.createWritable(rowKey, deletedAt, null));
}
return columnSize;
}
开发者ID:Netflix,项目名称:aegisthus,代码行数:28,代码来源:SSTableColumnScanner.java
示例13: createRowWritable
import org.apache.cassandra.db.OnDiskAtom; //导入依赖的package包/类
public static RowWritable createRowWritable(List<OnDiskAtom> columns, long deletedAt) {
RowWritable rowWritable = new RowWritable();
rowWritable.columns = columns;
rowWritable.deletedAt = deletedAt;
return rowWritable;
}
开发者ID:Netflix,项目名称:aegisthus,代码行数:8,代码来源:RowWritable.java
示例14: readFields
import org.apache.cassandra.db.OnDiskAtom; //导入依赖的package包/类
@Override
public void readFields(DataInput in) throws IOException {
deletedAt = in.readLong();
int columnCount = in.readInt();
columns = Lists.newArrayListWithCapacity(columnCount);
for (int i = 0; i < columnCount; i++) {
OnDiskAtom atom = serializer.deserializeFromSSTable(
in, ColumnSerializer.Flag.PRESERVE_SIZE, Integer.MIN_VALUE, version
);
columns.add(atom);
}
}
开发者ID:Netflix,项目名称:aegisthus,代码行数:13,代码来源:RowWritable.java
示例15: write
import org.apache.cassandra.db.OnDiskAtom; //导入依赖的package包/类
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(deletedAt);
out.writeInt(columns.size());
for (OnDiskAtom column : columns) {
serializer.serializeForSSTable(column, out);
}
}
开发者ID:Netflix,项目名称:aegisthus,代码行数:9,代码来源:RowWritable.java
示例16: testCreateRecordReader
import org.apache.cassandra.db.OnDiskAtom; //导入依赖的package包/类
@Test
public void testCreateRecordReader() throws Exception {
InputSplit mockInputSplit = mock(InputSplit.class);
TaskAttemptContext mockContext = mock(TaskAttemptContext.class);
SSTableColumnInputFormat inputFormat = new SSTableColumnInputFormat();
SSTableRecordReader<ByteBuffer, OnDiskAtom> rr =
inputFormat.createRecordReader(mockInputSplit, mockContext);
assertNotNull(rr);
assertTrue(rr instanceof SSTableColumnRecordReader);
}
开发者ID:Knewton,项目名称:KassandraMRHelper,代码行数:11,代码来源:SSTableColumnInputFormatTest.java
示例17: computeNext
import org.apache.cassandra.db.OnDiskAtom; //导入依赖的package包/类
protected OnDiskAtom computeNext()
{
getSubIterator();
return subIterator.hasNext() ? subIterator.next() : endOfData();
}
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:6,代码来源:LazyColumnIterator.java
示例18: next
import org.apache.cassandra.db.OnDiskAtom; //导入依赖的package包/类
public OnDiskAtom next()
{
return reader.next();
}
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:5,代码来源:SSTableSliceIterator.java
示例19: gatherTombstones
import org.apache.cassandra.db.OnDiskAtom; //导入依赖的package包/类
/**
* Given an iterator of on disk atom, returns an iterator that filters the tombstone range
* markers adding them to {@code returnCF} and returns the normal column.
*/
public static Iterator<Cell> gatherTombstones(final ColumnFamily returnCF, final Iterator<? extends OnDiskAtom> iter)
{
return new Iterator<Cell>()
{
private Cell next;
public boolean hasNext()
{
if (next != null)
return true;
getNext();
return next != null;
}
public Cell next()
{
if (next == null)
getNext();
assert next != null;
Cell toReturn = next;
next = null;
return toReturn;
}
private void getNext()
{
while (iter.hasNext())
{
OnDiskAtom atom = iter.next();
if (atom instanceof Cell)
{
next = (Cell)atom;
break;
}
else
{
returnCF.addAtom(atom);
}
}
}
public void remove()
{
throw new UnsupportedOperationException();
}
};
}
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:55,代码来源:QueryFilter.java
示例20: getOnDiskIterator
import org.apache.cassandra.db.OnDiskAtom; //导入依赖的package包/类
public Iterator<OnDiskAtom> getOnDiskIterator(DataInput in, Descriptor.Version version)
{
return getOnDiskIterator(in, ColumnSerializer.Flag.LOCAL, Integer.MIN_VALUE, version);
}
开发者ID:daidong,项目名称:GraphTrek,代码行数:5,代码来源:CFMetaData.java
注:本文中的org.apache.cassandra.db.OnDiskAtom类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论