本文整理汇总了Java中org.apache.cassandra.repair.Validator类的典型用法代码示例。如果您正苦于以下问题:Java Validator类的具体用法?Java Validator怎么用?Java Validator使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Validator类属于org.apache.cassandra.repair包,在下文中一共展示了Validator类的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: submitValidation
import org.apache.cassandra.repair.Validator; //导入依赖的package包/类
/**
* Does not mutate data, so is not scheduled.
*/
public Future<Object> submitValidation(final ColumnFamilyStore cfStore, final Validator validator)
{
Callable<Object> callable = new Callable<Object>()
{
public Object call() throws IOException
{
try
{
doValidationCompaction(cfStore, validator);
}
catch (Throwable e)
{
// we need to inform the remote end of our failure, otherwise it will hang on repair forever
validator.fail();
throw e;
}
return this;
}
};
return validationExecutor.submit(callable);
}
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:25,代码来源:CompactionManager.java
示例2: testValidationCompleteWrite
import org.apache.cassandra.repair.Validator; //导入依赖的package包/类
private void testValidationCompleteWrite() throws IOException
{
// empty validation
Validator v0 = new Validator(DESC, FBUtilities.getBroadcastAddress(), -1);
ValidationComplete c0 = new ValidationComplete(DESC, v0.tree);
// validation with a tree
IPartitioner p = new RandomPartitioner();
MerkleTree mt = new MerkleTree(p, FULL_RANGE, MerkleTree.RECOMMENDED_DEPTH, Integer.MAX_VALUE);
for (int i = 0; i < 10; i++)
mt.split(p.getRandomToken());
Validator v1 = new Validator(DESC, FBUtilities.getBroadcastAddress(), mt, -1);
ValidationComplete c1 = new ValidationComplete(DESC, v1.tree);
// validation failed
ValidationComplete c3 = new ValidationComplete(DESC);
testRepairMessageWrite("service.ValidationComplete.bin", c0, c1, c3);
}
开发者ID:pgaref,项目名称:ACaZoo,代码行数:20,代码来源:SerializationsTest.java
示例3: testValidationCompleteWrite
import org.apache.cassandra.repair.Validator; //导入依赖的package包/类
private void testValidationCompleteWrite() throws IOException
{
IPartitioner p = RandomPartitioner.instance;
MerkleTrees mt = new MerkleTrees(p);
// empty validation
mt.addMerkleTree((int) Math.pow(2, 15), FULL_RANGE);
Validator v0 = new Validator(DESC, FBUtilities.getBroadcastAddress(), -1);
ValidationComplete c0 = new ValidationComplete(DESC, mt);
// validation with a tree
mt = new MerkleTrees(p);
mt.addMerkleTree(Integer.MAX_VALUE, FULL_RANGE);
for (int i = 0; i < 10; i++)
mt.split(p.getRandomToken());
Validator v1 = new Validator(DESC, FBUtilities.getBroadcastAddress(), -1);
ValidationComplete c1 = new ValidationComplete(DESC, mt);
// validation failed
ValidationComplete c3 = new ValidationComplete(DESC);
testRepairMessageWrite("service.ValidationComplete.bin", c0, c1, c3);
}
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:25,代码来源:SerializationsTest.java
示例4: testValidationMultipleSSTablePerLevel
import org.apache.cassandra.repair.Validator; //导入依赖的package包/类
@Test
public void testValidationMultipleSSTablePerLevel() throws Exception
{
byte [] b = new byte[100 * 1024];
new Random().nextBytes(b);
ByteBuffer value = ByteBuffer.wrap(b); // 100 KB value, make it easy to have multiple files
// Enough data to have a level 1 and 2
int rows = 20;
int columns = 10;
// Adds enough data to trigger multiple sstable per level
for (int r = 0; r < rows; r++)
{
DecoratedKey key = Util.dk(String.valueOf(r));
Mutation rm = new Mutation(ksname, key.getKey());
for (int c = 0; c < columns; c++)
{
rm.add(cfname, Util.cellname("column" + c), value, 0);
}
rm.apply();
cfs.forceBlockingFlush();
}
waitForLeveling(cfs);
WrappingCompactionStrategy strategy = (WrappingCompactionStrategy) cfs.getCompactionStrategy();
// Checking we're not completely bad at math
assertTrue(strategy.getSSTableCountPerLevel()[1] > 0);
assertTrue(strategy.getSSTableCountPerLevel()[2] > 0);
Range<Token> range = new Range<>(Util.token(""), Util.token(""));
int gcBefore = keyspace.getColumnFamilyStore(cfname).gcBefore(System.currentTimeMillis());
UUID parentRepSession = UUID.randomUUID();
ActiveRepairService.instance.registerParentRepairSession(parentRepSession, Arrays.asList(cfs), Arrays.asList(range));
RepairJobDesc desc = new RepairJobDesc(parentRepSession, UUID.randomUUID(), ksname, cfname, range);
Validator validator = new Validator(desc, FBUtilities.getBroadcastAddress(), gcBefore);
CompactionManager.instance.submitValidation(cfs, validator).get();
}
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:39,代码来源:LeveledCompactionStrategyTest.java
示例5: getSSTablesToValidate
import org.apache.cassandra.repair.Validator; //导入依赖的package包/类
private synchronized Refs<SSTableReader> getSSTablesToValidate(ColumnFamilyStore cfs, Validator validator)
{
Refs<SSTableReader> sstables;
ActiveRepairService.ParentRepairSession prs = ActiveRepairService.instance.getParentRepairSession(validator.desc.parentSessionId);
if (prs == null)
return null;
Set<SSTableReader> sstablesToValidate = new HashSet<>();
if (prs.isGlobal)
prs.markSSTablesRepairing(cfs.metadata.cfId, validator.desc.parentSessionId);
// note that we always grab all existing sstables for this - if we were to just grab the ones that
// were marked as repairing, we would miss any ranges that were compacted away and this would cause us to overstream
try (ColumnFamilyStore.RefViewFragment sstableCandidates = cfs.selectAndReference(View.select(SSTableSet.CANONICAL, (s) -> !prs.isIncremental || !s.isRepaired())))
{
for (SSTableReader sstable : sstableCandidates.sstables)
{
if (new Bounds<>(sstable.first.getToken(), sstable.last.getToken()).intersects(validator.desc.ranges))
{
sstablesToValidate.add(sstable);
}
}
sstables = Refs.tryRef(sstablesToValidate);
if (sstables == null)
{
logger.error("Could not reference sstables");
throw new RuntimeException("Could not reference sstables");
}
}
return sstables;
}
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:33,代码来源:CompactionManager.java
示例6: testValidationMultipleSSTablePerLevel
import org.apache.cassandra.repair.Validator; //导入依赖的package包/类
@Test
public void testValidationMultipleSSTablePerLevel() throws Exception
{
byte [] b = new byte[100 * 1024];
new Random().nextBytes(b);
ByteBuffer value = ByteBuffer.wrap(b); // 100 KB value, make it easy to have multiple files
// Enough data to have a level 1 and 2
int rows = 40;
int columns = 20;
// Adds enough data to trigger multiple sstable per level
for (int r = 0; r < rows; r++)
{
UpdateBuilder update = UpdateBuilder.create(cfs.metadata, String.valueOf(r));
for (int c = 0; c < columns; c++)
update.newRow("column" + c).add("val", value);
update.applyUnsafe();
cfs.forceBlockingFlush();
}
waitForLeveling(cfs);
CompactionStrategyManager strategy = cfs.getCompactionStrategyManager();
// Checking we're not completely bad at math
assertTrue(strategy.getSSTableCountPerLevel()[1] > 0);
assertTrue(strategy.getSSTableCountPerLevel()[2] > 0);
Range<Token> range = new Range<>(Util.token(""), Util.token(""));
int gcBefore = keyspace.getColumnFamilyStore(CF_STANDARDDLEVELED).gcBefore(FBUtilities.nowInSeconds());
UUID parentRepSession = UUID.randomUUID();
ActiveRepairService.instance.registerParentRepairSession(parentRepSession, FBUtilities.getBroadcastAddress(), Arrays.asList(cfs), Arrays.asList(range), false, System.currentTimeMillis(), true);
RepairJobDesc desc = new RepairJobDesc(parentRepSession, UUID.randomUUID(), KEYSPACE1, CF_STANDARDDLEVELED, Arrays.asList(range));
Validator validator = new Validator(desc, FBUtilities.getBroadcastAddress(), gcBefore);
CompactionManager.instance.submitValidation(cfs, validator).get();
}
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:36,代码来源:LeveledCompactionStrategyTest.java
示例7: doValidationCompaction
import org.apache.cassandra.repair.Validator; //导入依赖的package包/类
/**
* Performs a readonly "compaction" of all sstables in order to validate complete rows,
* but without writing the merge result
*/
private void doValidationCompaction(ColumnFamilyStore cfs, Validator validator) throws IOException
{
// this isn't meant to be race-proof, because it's not -- it won't cause bugs for a CFS to be dropped
// mid-validation, or to attempt to validate a droped CFS. this is just a best effort to avoid useless work,
// particularly in the scenario where a validation is submitted before the drop, and there are compactions
// started prior to the drop keeping some sstables alive. Since validationCompaction can run
// concurrently with other compactions, it would otherwise go ahead and scan those again.
if (!cfs.isValid())
return;
Collection<SSTableReader> sstables;
String snapshotName = validator.desc.sessionId.toString();
int gcBefore;
boolean isSnapshotValidation = cfs.snapshotExists(snapshotName);
if (isSnapshotValidation)
{
// If there is a snapshot created for the session then read from there.
sstables = cfs.getSnapshotSSTableReader(snapshotName);
// Computing gcbefore based on the current time wouldn't be very good because we know each replica will execute
// this at a different time (that's the whole purpose of repair with snaphsot). So instead we take the creation
// time of the snapshot, which should give us roughtly the same time on each replica (roughtly being in that case
// 'as good as in the non-snapshot' case)
gcBefore = cfs.gcBefore(cfs.getSnapshotCreationTime(snapshotName));
}
else
{
// flush first so everyone is validating data that is as similar as possible
StorageService.instance.forceKeyspaceFlush(cfs.keyspace.getName(), cfs.name);
// we don't mark validating sstables as compacting in DataTracker, so we have to mark them referenced
// instead so they won't be cleaned up if they do get compacted during the validation
sstables = cfs.markCurrentSSTablesReferenced();
if (validator.gcBefore > 0)
gcBefore = validator.gcBefore;
else
gcBefore = getDefaultGcBefore(cfs);
}
CompactionIterable ci = new ValidationCompactionIterable(cfs, sstables, validator.desc.range, gcBefore);
CloseableIterator<AbstractCompactedRow> iter = ci.iterator();
metrics.beginCompaction(ci);
try
{
// validate the CF as we iterate over it
validator.prepare(cfs);
while (iter.hasNext())
{
if (ci.isStopRequested())
throw new CompactionInterruptedException(ci.getCompactionInfo());
AbstractCompactedRow row = iter.next();
validator.add(row);
}
validator.complete();
}
finally
{
iter.close();
if (isSnapshotValidation)
{
for (SSTableReader sstable : sstables)
FileUtils.closeQuietly(sstable);
cfs.clearSnapshot(snapshotName);
}
else
{
SSTableReader.releaseReferences(sstables);
}
metrics.finishCompaction(ci);
}
}
开发者ID:pgaref,项目名称:ACaZoo,代码行数:77,代码来源:CompactionManager.java
示例8: doValidationCompaction
import org.apache.cassandra.repair.Validator; //导入依赖的package包/类
/**
* Performs a readonly "compaction" of all sstables in order to validate complete rows,
* but without writing the merge result
*/
private void doValidationCompaction(ColumnFamilyStore cfs, Validator validator) throws IOException
{
// this isn't meant to be race-proof, because it's not -- it won't cause bugs for a CFS to be dropped
// mid-validation, or to attempt to validate a droped CFS. this is just a best effort to avoid useless work,
// particularly in the scenario where a validation is submitted before the drop, and there are compactions
// started prior to the drop keeping some sstables alive. Since validationCompaction can run
// concurrently with other compactions, it would otherwise go ahead and scan those again.
if (!cfs.isValid())
return;
Collection<SSTableReader> sstables;
String snapshotName = validator.desc.sessionId.toString();
int gcBefore;
boolean isSnapshotValidation = cfs.snapshotExists(snapshotName);
if (isSnapshotValidation)
{
// If there is a snapshot created for the session then read from there.
sstables = cfs.getSnapshotSSTableReader(snapshotName);
// Computing gcbefore based on the current time wouldn't be very good because we know each replica will execute
// this at a different time (that's the whole purpose of repair with snaphsot). So instead we take the creation
// time of the snapshot, which should give us roughtly the same time on each replica (roughtly being in that case
// 'as good as in the non-snapshot' case)
gcBefore = cfs.gcBefore(cfs.getSnapshotCreationTime(snapshotName));
}
else
{
// flush first so everyone is validating data that is as similar as possible
StorageService.instance.forceKeyspaceFlush(cfs.keyspace.getName(), cfs.name);
// we don't mark validating sstables as compacting in DataTracker, so we have to mark them referenced
// instead so they won't be cleaned up if they do get compacted during the validation
if (validator.desc.parentSessionId == null || ActiveRepairService.instance.getParentRepairSession(validator.desc.parentSessionId) == null)
sstables = cfs.markCurrentSSTablesReferenced();
else
sstables = ActiveRepairService.instance.getParentRepairSession(validator.desc.parentSessionId).getAndReferenceSSTables(cfs.metadata.cfId);
if (validator.gcBefore > 0)
gcBefore = validator.gcBefore;
else
gcBefore = getDefaultGcBefore(cfs);
}
CompactionIterable ci = new ValidationCompactionIterable(cfs, sstables, validator.desc.range, gcBefore);
CloseableIterator<AbstractCompactedRow> iter = ci.iterator();
metrics.beginCompaction(ci);
try
{
// validate the CF as we iterate over it
validator.prepare(cfs);
while (iter.hasNext())
{
if (ci.isStopRequested())
throw new CompactionInterruptedException(ci.getCompactionInfo());
AbstractCompactedRow row = iter.next();
validator.add(row);
}
validator.complete();
}
finally
{
iter.close();
SSTableReader.releaseReferences(sstables);
if (isSnapshotValidation)
{
cfs.clearSnapshot(snapshotName);
}
metrics.finishCompaction(ci);
}
}
开发者ID:rajath26,项目名称:cassandra-trunk,代码行数:75,代码来源:CompactionManager.java
注:本文中的org.apache.cassandra.repair.Validator类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论