本文整理汇总了Golang中github.com/cockroachdb/cockroach/storage.TestStoreContext函数的典型用法代码示例。如果您正苦于以下问题:Golang TestStoreContext函数的具体用法?Golang TestStoreContext怎么用?Golang TestStoreContext使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了TestStoreContext函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: TestStoreRangeMergeTwoEmptyRanges
// TestStoreRangeMergeTwoEmptyRanges tries to merge two empty ranges together.
func TestStoreRangeMergeTwoEmptyRanges(t *testing.T) {
defer leaktest.AfterTest(t)()
sCtx := storage.TestStoreContext()
sCtx.TestingKnobs.DisableSplitQueue = true
store, stopper, _ := createTestStoreWithContext(t, sCtx)
defer stopper.Stop()
if _, _, err := createSplitRanges(store); err != nil {
t.Fatal(err)
}
// Merge the b range back into the a range.
args := adminMergeArgs(roachpb.KeyMin)
_, err := client.SendWrapped(rg1(store), nil, &args)
if err != nil {
t.Fatal(err)
}
// Verify the merge by looking up keys from both ranges.
replicaA := store.LookupReplica([]byte("a"), nil)
replicaB := store.LookupReplica([]byte("c"), nil)
if !reflect.DeepEqual(replicaA, replicaB) {
t.Fatalf("ranges were not merged %s!=%s", replicaA, replicaB)
}
}
开发者ID:yangxuanjia,项目名称:cockroach,代码行数:27,代码来源:client_merge_test.go
示例2: TestLeaderAfterSplit
// TestLeaderAfterSplit verifies that a raft group created by a split
// elects a leader without waiting for an election timeout.
func TestLeaderAfterSplit(t *testing.T) {
defer leaktest.AfterTest(t)()
storeContext := storage.TestStoreContext()
storeContext.RaftElectionTimeoutTicks = 1000000
mtc := &multiTestContext{
storeContext: &storeContext,
}
mtc.Start(t, 3)
defer mtc.Stop()
mtc.replicateRange(1, 1, 2)
leftKey := roachpb.Key("a")
splitKey := roachpb.Key("m")
rightKey := roachpb.Key("z")
splitArgs := adminSplitArgs(roachpb.KeyMin, splitKey)
if _, pErr := client.SendWrapped(mtc.distSenders[0], nil, &splitArgs); pErr != nil {
t.Fatal(pErr)
}
incArgs := incrementArgs(leftKey, 1)
if _, pErr := client.SendWrapped(mtc.distSenders[0], nil, &incArgs); pErr != nil {
t.Fatal(pErr)
}
incArgs = incrementArgs(rightKey, 2)
if _, pErr := client.SendWrapped(mtc.distSenders[0], nil, &incArgs); pErr != nil {
t.Fatal(pErr)
}
}
开发者ID:chzyer-dev,项目名称:cockroach,代码行数:33,代码来源:client_split_test.go
示例3: TestStoreRangeMergeStats
// TestStoreRangeMergeStats starts by splitting a range, then writing random data
// to both sides of the split. It then merges the ranges and verifies the merged
// range has stats consistent with recomputations.
func TestStoreRangeMergeStats(t *testing.T) {
defer leaktest.AfterTest(t)()
sCtx := storage.TestStoreContext()
sCtx.TestingKnobs.DisableSplitQueue = true
store, stopper, manual := createTestStoreWithContext(t, sCtx)
defer stopper.Stop()
// Split the range.
aDesc, bDesc, err := createSplitRanges(store)
if err != nil {
t.Fatal(err)
}
// Write some values left and right of the proposed split key.
writeRandomDataToRange(t, store, aDesc.RangeID, []byte("aaa"))
writeRandomDataToRange(t, store, bDesc.RangeID, []byte("ccc"))
// Get the range stats for both ranges now that we have data.
var msA, msB enginepb.MVCCStats
snap := store.Engine().NewSnapshot()
defer snap.Close()
if err := engine.MVCCGetRangeStats(context.Background(), snap, aDesc.RangeID, &msA); err != nil {
t.Fatal(err)
}
if err := engine.MVCCGetRangeStats(context.Background(), snap, bDesc.RangeID, &msB); err != nil {
t.Fatal(err)
}
// Stats should agree with recomputation.
if err := verifyRecomputedStats(snap, aDesc, msA, manual.UnixNano()); err != nil {
t.Fatalf("failed to verify range A's stats before split: %v", err)
}
if err := verifyRecomputedStats(snap, bDesc, msB, manual.UnixNano()); err != nil {
t.Fatalf("failed to verify range B's stats before split: %v", err)
}
manual.Increment(100)
// Merge the b range back into the a range.
args := adminMergeArgs(roachpb.KeyMin)
if _, err := client.SendWrapped(rg1(store), nil, &args); err != nil {
t.Fatal(err)
}
rngMerged := store.LookupReplica(aDesc.StartKey, nil)
// Get the range stats for the merged range and verify.
snap = store.Engine().NewSnapshot()
defer snap.Close()
var msMerged enginepb.MVCCStats
if err := engine.MVCCGetRangeStats(context.Background(), snap, rngMerged.RangeID, &msMerged); err != nil {
t.Fatal(err)
}
// Merged stats should agree with recomputation.
if err := verifyRecomputedStats(snap, rngMerged.Desc(), msMerged, manual.UnixNano()); err != nil {
t.Errorf("failed to verify range's stats after merge: %v", err)
}
}
开发者ID:yangxuanjia,项目名称:cockroach,代码行数:61,代码来源:client_merge_test.go
示例4: TestReplicaGCQueueDropReplicaDirect
// TestReplicaGCQueueDropReplica verifies that a removed replica is
// immediately cleaned up.
func TestReplicaGCQueueDropReplicaDirect(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := &multiTestContext{}
const numStores = 3
rangeID := roachpb.RangeID(1)
// In this test, the Replica on the second Node is removed, and the test
// verifies that that Node adds this Replica to its RangeGCQueue. However,
// the queue does a consistent lookup which will usually be read from
// Node 1. Hence, if Node 1 hasn't processed the removal when Node 2 has,
// no GC will take place since the consistent RangeLookup hits the first
// Node. We use the TestingCommandFilter to make sure that the second Node
// waits for the first.
ctx := storage.TestStoreContext()
mtc.storeContext = &ctx
mtc.storeContext.TestingKnobs.TestingCommandFilter =
func(filterArgs storageutils.FilterArgs) error {
et, ok := filterArgs.Req.(*roachpb.EndTransactionRequest)
if !ok || filterArgs.Sid != 2 {
return nil
}
rct := et.InternalCommitTrigger.GetChangeReplicasTrigger()
if rct == nil || rct.ChangeType != roachpb.REMOVE_REPLICA {
return nil
}
util.SucceedsSoon(t, func() error {
r, err := mtc.stores[0].GetReplica(rangeID)
if err != nil {
return err
}
if i, _ := r.Desc().FindReplica(2); i >= 0 {
return errors.New("expected second node gone from first node's known replicas")
}
return nil
})
return nil
}
mtc.Start(t, numStores)
defer mtc.Stop()
mtc.replicateRange(rangeID, 1, 2)
mtc.unreplicateRange(rangeID, 1)
// Make sure the range is removed from the store.
util.SucceedsSoon(t, func() error {
if _, err := mtc.stores[1].GetReplica(rangeID); !testutils.IsError(err, "range .* was not found") {
return util.Errorf("expected range removal")
}
return nil
})
}
开发者ID:bogdanbatog,项目名称:cockroach,代码行数:54,代码来源:client_replica_gc_test.go
示例5: TestStoreRangeMergeLastRange
// TestStoreRangeMergeLastRange verifies that merging the last range
// fails.
func TestStoreRangeMergeLastRange(t *testing.T) {
defer leaktest.AfterTest(t)()
sCtx := storage.TestStoreContext()
sCtx.TestingKnobs.DisableSplitQueue = true
store, stopper, _ := createTestStoreWithContext(t, sCtx)
defer stopper.Stop()
// Merge last range.
args := adminMergeArgs(roachpb.KeyMin)
if _, pErr := client.SendWrapped(rg1(store), nil, &args); !testutils.IsPError(pErr, "cannot merge final range") {
t.Fatalf("expected 'cannot merge final range' error; got %s", pErr)
}
}
开发者ID:yangxuanjia,项目名称:cockroach,代码行数:15,代码来源:client_merge_test.go
示例6: TestStoreRangeSplitInsideRow
// TestStoreRangeSplitInsideRow verifies an attempt to split a range inside of
// a table row will cause a split at a boundary between rows.
func TestStoreRangeSplitInsideRow(t *testing.T) {
defer leaktest.AfterTest(t)()
sCtx := storage.TestStoreContext()
sCtx.TestingKnobs.DisableSplitQueue = true
store, stopper, _ := createTestStoreWithContext(t, sCtx)
defer stopper.Stop()
// Manually create some the column keys corresponding to the table:
//
// CREATE TABLE t (id STRING PRIMARY KEY, col1 INT, col2 INT)
tableKey := keys.MakeTablePrefix(keys.MaxReservedDescID + 1)
rowKey := roachpb.Key(encoding.EncodeVarintAscending(append([]byte(nil), tableKey...), 1))
rowKey = encoding.EncodeStringAscending(encoding.EncodeVarintAscending(rowKey, 1), "a")
col1Key := keys.MakeFamilyKey(append([]byte(nil), rowKey...), 1)
col2Key := keys.MakeFamilyKey(append([]byte(nil), rowKey...), 2)
// We don't care about the value, so just store any old thing.
if err := store.DB().Put(col1Key, "column 1"); err != nil {
t.Fatal(err)
}
if err := store.DB().Put(col2Key, "column 2"); err != nil {
t.Fatal(err)
}
// Split between col1Key and col2Key by splitting before col2Key.
args := adminSplitArgs(col2Key, col2Key)
_, err := client.SendWrapped(rg1(store), nil, &args)
if err != nil {
t.Fatalf("%s: split unexpected error: %s", col1Key, err)
}
rng1 := store.LookupReplica(col1Key, nil)
rng2 := store.LookupReplica(col2Key, nil)
// Verify the two columns are still on the same range.
if !reflect.DeepEqual(rng1, rng2) {
t.Fatalf("%s: ranges differ: %+v vs %+v", roachpb.Key(col1Key), rng1, rng2)
}
// Verify we split on a row key.
if startKey := rng1.Desc().StartKey; !startKey.Equal(rowKey) {
t.Fatalf("%s: expected split on %s, but found %s",
roachpb.Key(col1Key), roachpb.Key(rowKey), startKey)
}
// Verify the previous range was split on a row key.
rng3 := store.LookupReplica(tableKey, nil)
if endKey := rng3.Desc().EndKey; !endKey.Equal(rowKey) {
t.Fatalf("%s: expected split on %s, but found %s",
roachpb.Key(col1Key), roachpb.Key(rowKey), endKey)
}
}
开发者ID:YuleiXiao,项目名称:cockroach,代码行数:52,代码来源:client_split_test.go
示例7: TestStoreRangeSplitAtTablePrefix
// TestStoreRangeSplitAtTablePrefix verifies a range can be split at
// UserTableDataMin and still gossip the SystemConfig properly.
func TestStoreRangeSplitAtTablePrefix(t *testing.T) {
defer leaktest.AfterTest(t)()
sCtx := storage.TestStoreContext()
sCtx.TestingKnobs.DisableSplitQueue = true
store, stopper, _ := createTestStoreWithContext(t, sCtx)
defer stopper.Stop()
key := keys.MakeRowSentinelKey(append([]byte(nil), keys.UserTableDataMin...))
args := adminSplitArgs(key, key)
if _, pErr := client.SendWrapped(rg1(store), nil, &args); pErr != nil {
t.Fatalf("%q: split unexpected error: %s", key, pErr)
}
var desc sqlbase.TableDescriptor
descBytes, err := protoutil.Marshal(&desc)
if err != nil {
t.Fatal(err)
}
// Update SystemConfig to trigger gossip.
if err := store.DB().Txn(func(txn *client.Txn) error {
txn.SetSystemConfigTrigger()
// We don't care about the values, just the keys.
k := sqlbase.MakeDescMetadataKey(sqlbase.ID(keys.MaxReservedDescID + 1))
return txn.Put(k, &desc)
}); err != nil {
t.Fatal(err)
}
successChan := make(chan struct{}, 1)
store.Gossip().RegisterCallback(gossip.KeySystemConfig, func(_ string, content roachpb.Value) {
contentBytes, err := content.GetBytes()
if err != nil {
t.Fatal(err)
}
if bytes.Contains(contentBytes, descBytes) {
select {
case successChan <- struct{}{}:
default:
}
}
})
select {
case <-time.After(time.Second):
t.Errorf("expected a schema gossip containing %q, but did not see one", descBytes)
case <-successChan:
}
}
开发者ID:YuleiXiao,项目名称:cockroach,代码行数:51,代码来源:client_split_test.go
示例8: Start
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester, baseCtx *base.Context, initSender InitSenderFn) {
nodeID := roachpb.NodeID(1)
nodeDesc := &roachpb.NodeDescriptor{NodeID: nodeID}
tracer := tracing.NewTracer()
ltc.tester = t
ltc.Manual = hlc.NewManualClock(0)
ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
ltc.Stopper = stop.NewStopper()
rpcContext := rpc.NewContext(baseCtx, ltc.Clock, ltc.Stopper)
server := rpc.NewServer(rpcContext) // never started
ltc.Gossip = gossip.New(
context.Background(), rpcContext, server, nil, ltc.Stopper, metric.NewRegistry())
ltc.Eng = engine.NewInMem(roachpb.Attributes{}, 50<<20, ltc.Stopper)
ltc.Stores = storage.NewStores(ltc.Clock)
ltc.Sender = initSender(nodeDesc, tracer, ltc.Clock, ltc.Latency, ltc.Stores, ltc.Stopper,
ltc.Gossip)
if ltc.DBContext == nil {
dbCtx := client.DefaultDBContext()
ltc.DBContext = &dbCtx
}
ltc.DB = client.NewDBWithContext(ltc.Sender, *ltc.DBContext)
transport := storage.NewDummyRaftTransport()
ctx := storage.TestStoreContext()
if ltc.RangeRetryOptions != nil {
ctx.RangeRetryOptions = *ltc.RangeRetryOptions
}
ctx.Ctx = tracing.WithTracer(context.Background(), tracer)
ctx.Clock = ltc.Clock
ctx.DB = ltc.DB
ctx.Gossip = ltc.Gossip
ctx.Transport = transport
ltc.Store = storage.NewStore(ctx, ltc.Eng, nodeDesc)
if err := ltc.Store.Bootstrap(roachpb.StoreIdent{NodeID: nodeID, StoreID: 1}, ltc.Stopper); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
ltc.Stores.AddStore(ltc.Store)
if err := ltc.Store.BootstrapRange(nil); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
if err := ltc.Store.Start(context.Background(), ltc.Stopper); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
ltc.Gossip.SetNodeID(nodeDesc.NodeID)
if err := ltc.Gossip.SetNodeDescriptor(nodeDesc); err != nil {
t.Fatalf("unable to set node descriptor: %s", err)
}
}
开发者ID:yaojingguo,项目名称:cockroach,代码行数:54,代码来源:local_test_cluster.go
示例9: TestStoreRangeSplitAtRangeBounds
// TestStoreRangeSplitAtRangeBounds verifies a range cannot be split
// at its start or end keys (would create zero-length range!). This
// sort of thing might happen in the wild if two split requests
// arrived for same key. The first one succeeds and second would try
// to split at the start of the newly split range.
func TestStoreRangeSplitAtRangeBounds(t *testing.T) {
defer leaktest.AfterTest(t)()
sCtx := storage.TestStoreContext()
sCtx.TestingKnobs.DisableSplitQueue = true
store, stopper, _ := createTestStoreWithContext(t, sCtx)
defer stopper.Stop()
args := adminSplitArgs(roachpb.KeyMin, []byte("a"))
if _, err := client.SendWrapped(rg1(store), nil, &args); err != nil {
t.Fatal(err)
}
// This second split will try to split at end of first split range.
if _, err := client.SendWrapped(rg1(store), nil, &args); err == nil {
t.Fatalf("split succeeded unexpectedly")
}
// Now try to split at start of new range.
args = adminSplitArgs(roachpb.KeyMin, []byte("a"))
if _, err := client.SendWrapped(rg1(store), nil, &args); err == nil {
t.Fatalf("split succeeded unexpectedly")
}
}
开发者ID:YuleiXiao,项目名称:cockroach,代码行数:26,代码来源:client_split_test.go
示例10: Start
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester, baseCtx *base.Context, initSender InitSenderFn) {
nodeID := roachpb.NodeID(1)
nodeDesc := &roachpb.NodeDescriptor{NodeID: nodeID}
tracer := tracing.NewTracer()
ltc.tester = t
ltc.Manual = hlc.NewManualClock(0)
ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
ltc.Stopper = stop.NewStopper()
rpcContext := rpc.NewContext(baseCtx, ltc.Clock, ltc.Stopper)
ltc.Gossip = gossip.New(rpcContext, nil, ltc.Stopper)
ltc.Eng = engine.NewInMem(roachpb.Attributes{}, 50<<20, ltc.Stopper)
ltc.Stores = storage.NewStores(ltc.Clock)
ltc.Sender = initSender(nodeDesc, tracer, ltc.Clock, ltc.Latency, ltc.Stores, ltc.Stopper,
ltc.Gossip)
ltc.DB = client.NewDB(ltc.Sender)
transport := storage.NewDummyRaftTransport()
ctx := storage.TestStoreContext()
ctx.Clock = ltc.Clock
ctx.DB = ltc.DB
ctx.Gossip = ltc.Gossip
ctx.Transport = transport
ctx.Tracer = tracer
ltc.Store = storage.NewStore(ctx, ltc.Eng, nodeDesc)
if err := ltc.Store.Bootstrap(roachpb.StoreIdent{NodeID: nodeID, StoreID: 1}, ltc.Stopper); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
ltc.Stores.AddStore(ltc.Store)
if err := ltc.Store.BootstrapRange(nil); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
if err := ltc.Store.Start(ltc.Stopper); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
ltc.Gossip.SetNodeID(nodeDesc.NodeID)
if err := ltc.Gossip.SetNodeDescriptor(nodeDesc); err != nil {
t.Fatalf("unable to set node descriptor: %s", err)
}
}
开发者ID:GitGoldie,项目名称:cockroach,代码行数:45,代码来源:local_test_cluster.go
示例11: BenchmarkStoreRangeSplit
func BenchmarkStoreRangeSplit(b *testing.B) {
defer tracing.Disable()()
sCtx := storage.TestStoreContext()
sCtx.TestingKnobs.DisableSplitQueue = true
store, stopper, _ := createTestStoreWithContext(b, sCtx)
defer stopper.Stop()
// Perform initial split of ranges.
sArgs := adminSplitArgs(roachpb.KeyMin, []byte("b"))
if _, err := client.SendWrapped(rg1(store), nil, &sArgs); err != nil {
b.Fatal(err)
}
// Write some values left and right of the split key.
aDesc := store.LookupReplica([]byte("a"), nil).Desc()
bDesc := store.LookupReplica([]byte("c"), nil).Desc()
writeRandomDataToRange(b, store, aDesc.RangeID, []byte("aaa"))
writeRandomDataToRange(b, store, bDesc.RangeID, []byte("ccc"))
// Merge the b range back into the a range.
mArgs := adminMergeArgs(roachpb.KeyMin)
if _, err := client.SendWrapped(rg1(store), nil, &mArgs); err != nil {
b.Fatal(err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
// Split the range.
b.StartTimer()
if _, err := client.SendWrapped(rg1(store), nil, &sArgs); err != nil {
b.Fatal(err)
}
// Merge the ranges.
b.StopTimer()
if _, err := client.SendWrapped(rg1(store), nil, &mArgs); err != nil {
b.Fatal(err)
}
}
}
开发者ID:YuleiXiao,项目名称:cockroach,代码行数:40,代码来源:client_split_test.go
示例12: TestStoreRangeSplitConcurrent
// TestStoreRangeSplitConcurrent verifies that concurrent range splits
// of the same range are executed serially, and all but the first fail
// because the split key is invalid after the first split succeeds.
func TestStoreRangeSplitConcurrent(t *testing.T) {
defer leaktest.AfterTest(t)()
sCtx := storage.TestStoreContext()
sCtx.TestingKnobs.DisableSplitQueue = true
store, stopper, _ := createTestStoreWithContext(t, sCtx)
defer stopper.Stop()
splitKey := roachpb.Key("a")
concurrentCount := int32(10)
wg := sync.WaitGroup{}
wg.Add(int(concurrentCount))
failureCount := int32(0)
for i := int32(0); i < concurrentCount; i++ {
go func() {
args := adminSplitArgs(roachpb.KeyMin, splitKey)
_, pErr := client.SendWrapped(rg1(store), nil, &args)
if pErr != nil {
atomic.AddInt32(&failureCount, 1)
}
wg.Done()
}()
}
wg.Wait()
if failureCount != concurrentCount-1 {
t.Fatalf("concurrent splits succeeded unexpectedly; failureCount=%d", failureCount)
}
// Verify everything ended up as expected.
if a, e := store.ReplicaCount(), 2; a != e {
t.Fatalf("expected %d stores after concurrent splits; actual count=%d", e, a)
}
rng := store.LookupReplica(roachpb.RKeyMin, nil)
newRng := store.LookupReplica(roachpb.RKey(splitKey), nil)
if !bytes.Equal(newRng.Desc().StartKey, splitKey) || !bytes.Equal(splitKey, rng.Desc().EndKey) {
t.Errorf("ranges mismatched, wanted %q=%q=%q", newRng.Desc().StartKey, splitKey, rng.Desc().EndKey)
}
if !bytes.Equal(newRng.Desc().EndKey, roachpb.RKeyMax) || !bytes.Equal(rng.Desc().StartKey, roachpb.RKeyMin) {
t.Errorf("new ranges do not cover KeyMin-KeyMax, but only %q-%q", rng.Desc().StartKey, newRng.Desc().EndKey)
}
}
开发者ID:YuleiXiao,项目名称:cockroach,代码行数:43,代码来源:client_split_test.go
示例13: TestStoreRangeMergeWithData
// TestStoreRangeMergeWithData attempts to merge two collocate ranges
// each containing data.
func TestStoreRangeMergeWithData(t *testing.T) {
defer leaktest.AfterTest(t)()
sCtx := storage.TestStoreContext()
sCtx.TestingKnobs.DisableSplitQueue = true
store, stopper, _ := createTestStoreWithContext(t, sCtx)
defer stopper.Stop()
content := roachpb.Key("testing!")
aDesc, bDesc, err := createSplitRanges(store)
if err != nil {
t.Fatal(err)
}
// Write some values left and right of the proposed split key.
pArgs := putArgs([]byte("aaa"), content)
if _, err := client.SendWrapped(rg1(store), nil, &pArgs); err != nil {
t.Fatal(err)
}
pArgs = putArgs([]byte("ccc"), content)
if _, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
RangeID: bDesc.RangeID,
}, &pArgs); err != nil {
t.Fatal(err)
}
// Confirm the values are there.
gArgs := getArgs([]byte("aaa"))
if reply, err := client.SendWrapped(rg1(store), nil, &gArgs); err != nil {
t.Fatal(err)
} else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil {
t.Fatal(err)
} else if !bytes.Equal(replyBytes, content) {
t.Fatalf("actual value %q did not match expected value %q", replyBytes, content)
}
gArgs = getArgs([]byte("ccc"))
if reply, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
RangeID: bDesc.RangeID,
}, &gArgs); err != nil {
t.Fatal(err)
} else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil {
t.Fatal(err)
} else if !bytes.Equal(replyBytes, content) {
t.Fatalf("actual value %q did not match expected value %q", replyBytes, content)
}
// Merge the b range back into the a range.
args := adminMergeArgs(roachpb.KeyMin)
if _, err := client.SendWrapped(rg1(store), nil, &args); err != nil {
t.Fatal(err)
}
// Verify no intents remains on range descriptor keys.
for _, key := range []roachpb.Key{keys.RangeDescriptorKey(aDesc.StartKey), keys.RangeDescriptorKey(bDesc.StartKey)} {
if _, _, err := engine.MVCCGet(context.Background(), store.Engine(), key, store.Clock().Now(), true, nil); err != nil {
t.Fatal(err)
}
}
// Verify the merge by looking up keys from both ranges.
rangeA := store.LookupReplica([]byte("a"), nil)
rangeB := store.LookupReplica([]byte("c"), nil)
rangeADesc := rangeA.Desc()
rangeBDesc := rangeB.Desc()
if !reflect.DeepEqual(rangeA, rangeB) {
t.Fatalf("ranges were not merged %+v=%+v", rangeADesc, rangeBDesc)
}
if !bytes.Equal(rangeADesc.StartKey, roachpb.RKeyMin) {
t.Fatalf("The start key is not equal to KeyMin %q=%q", rangeADesc.StartKey, roachpb.RKeyMin)
}
if !bytes.Equal(rangeADesc.EndKey, roachpb.RKeyMax) {
t.Fatalf("The end key is not equal to KeyMax %q=%q", rangeADesc.EndKey, roachpb.RKeyMax)
}
// Try to get values from after the merge.
gArgs = getArgs([]byte("aaa"))
if reply, err := client.SendWrapped(rg1(store), nil, &gArgs); err != nil {
t.Fatal(err)
} else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil {
t.Fatal(err)
} else if !bytes.Equal(replyBytes, content) {
t.Fatalf("actual value %q did not match expected value %q", replyBytes, content)
}
gArgs = getArgs([]byte("ccc"))
if reply, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
RangeID: rangeB.RangeID,
}, &gArgs); err != nil {
t.Fatal(err)
} else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil {
t.Fatal(err)
} else if !bytes.Equal(replyBytes, content) {
t.Fatalf("actual value %q did not match expected value %q", replyBytes, content)
}
// Put new values after the merge on both sides.
pArgs = putArgs([]byte("aaaa"), content)
if _, err := client.SendWrapped(rg1(store), nil, &pArgs); err != nil {
//.........这里部分代码省略.........
开发者ID:yangxuanjia,项目名称:cockroach,代码行数:101,代码来源:client_merge_test.go
示例14: TestRangeLookupUseReverse
// TestRangeLookupUseReverse tests whether the results and the results count
// are correct when scanning in reverse order.
func TestRangeLookupUseReverse(t *testing.T) {
defer leaktest.AfterTest(t)()
sCtx := storage.TestStoreContext()
sCtx.TestingKnobs.DisableSplitQueue = true
store, stopper, _ := createTestStoreWithContext(t, sCtx)
defer stopper.Stop()
// Init test ranges:
// ["","a"), ["a","c"), ["c","e"), ["e","g") and ["g","\xff\xff").
splits := []roachpb.AdminSplitRequest{
adminSplitArgs(roachpb.Key("g"), roachpb.Key("g")),
adminSplitArgs(roachpb.Key("e"), roachpb.Key("e")),
adminSplitArgs(roachpb.Key("c"), roachpb.Key("c")),
adminSplitArgs(roachpb.Key("a"), roachpb.Key("a")),
}
for _, split := range splits {
_, pErr := client.SendWrapped(rg1(store), nil, &split)
if pErr != nil {
t.Fatalf("%q: split unexpected error: %s", split.SplitKey, pErr)
}
}
// Resolve the intents.
scanArgs := roachpb.ScanRequest{
Span: roachpb.Span{
Key: keys.RangeMetaKey(roachpb.RKeyMin.Next()),
EndKey: keys.RangeMetaKey(roachpb.RKeyMax),
},
}
util.SucceedsSoon(t, func() error {
_, pErr := client.SendWrapped(rg1(store), nil, &scanArgs)
return pErr.GoError()
})
revScanArgs := func(key []byte, maxResults int32) *roachpb.RangeLookupRequest {
return &roachpb.RangeLookupRequest{
Span: roachpb.Span{
Key: key,
},
MaxRanges: maxResults,
Reverse: true,
}
}
// Test cases.
testCases := []struct {
request *roachpb.RangeLookupRequest
expected []roachpb.RangeDescriptor
expectedPre []roachpb.RangeDescriptor
}{
// Test key in the middle of the range.
{
request: revScanArgs(keys.RangeMetaKey(roachpb.RKey("f")), 2),
// ["e","g") and ["c","e").
expected: []roachpb.RangeDescriptor{
{StartKey: roachpb.RKey("e"), EndKey: roachpb.RKey("g")},
},
expectedPre: []roachpb.RangeDescriptor{
{StartKey: roachpb.RKey("c"), EndKey: roachpb.RKey("e")},
},
},
// Test key in the end key of the range.
{
request: revScanArgs(keys.RangeMetaKey(roachpb.RKey("g")), 3),
// ["e","g"), ["c","e") and ["a","c").
expected: []roachpb.RangeDescriptor{
{StartKey: roachpb.RKey("e"), EndKey: roachpb.RKey("g")},
},
expectedPre: []roachpb.RangeDescriptor{
{StartKey: roachpb.RKey("c"), EndKey: roachpb.RKey("e")},
{StartKey: roachpb.RKey("a"), EndKey: roachpb.RKey("c")},
},
},
{
request: revScanArgs(keys.RangeMetaKey(roachpb.RKey("e")), 2),
// ["c","e") and ["a","c").
expected: []roachpb.RangeDescriptor{
{StartKey: roachpb.RKey("c"), EndKey: roachpb.RKey("e")},
},
expectedPre: []roachpb.RangeDescriptor{
{StartKey: roachpb.RKey("a"), EndKey: roachpb.RKey("c")},
},
},
// Test Meta2KeyMax.
{
request: revScanArgs(keys.Meta2KeyMax, 2),
// ["e","g") and ["g","\xff\xff")
expected: []roachpb.RangeDescriptor{
{StartKey: roachpb.RKey("g"), EndKey: roachpb.RKey("\xff\xff")},
},
expectedPre: []roachpb.RangeDescriptor{
{StartKey: roachpb.RKey("e"), EndKey: roachpb.RKey("g")},
},
},
// Test Meta1KeyMax.
{
//.........这里部分代码省略.........
开发者ID:yangxuanjia,项目名称:cockroach,代码行数:101,代码来源:client_replica_test.go
示例15: TestRaftLogQueue
// TestRaftLogQueue verifies that the raft log queue correctly truncates the
// raft log.
func TestRaftLogQueue(t *testing.T) {
defer leaktest.AfterTest(t)()
var mtc multiTestContext
// Turn off raft elections so the raft leader won't change out from under
// us in this test.
sc := storage.TestStoreContext()
sc.RaftTickInterval = time.Hour * 24
sc.RaftElectionTimeoutTicks = 1000000
mtc.storeContext = &sc
mtc.Start(t, 3)
defer mtc.Stop()
// Write a single value to ensure we have a leader.
pArgs := putArgs([]byte("key"), []byte("value"))
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &pArgs); err != nil {
t.Fatal(err)
}
// Get the raft leader (and ensure one exists).
rangeID := mtc.stores[0].LookupReplica([]byte("a"), nil).RangeID
raftLeaderRepl := mtc.getRaftLeader(rangeID)
if raftLeaderRepl == nil {
t.Fatalf("could not find raft leader replica for range %d", rangeID)
}
originalIndex, err := raftLeaderRepl.GetFirstIndex()
if err != nil {
t.Fatal(err)
}
// Write a collection of values to increase the raft log.
for i := 0; i < storage.RaftLogQueueStaleThreshold+1; i++ {
pArgs = putArgs([]byte(fmt.Sprintf("key-%d", i)), []byte("value"))
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &pArgs); err != nil {
t.Fatal(err)
}
}
// Sadly, occasionally the queue has a race with the force processing so
// this succeeds within will captures those rare cases.
var afterTruncationIndex uint64
util.SucceedsSoon(t, func() error {
// Force a truncation check.
for _, store := range mtc.stores {
store.ForceRaftLogScanAndProcess()
}
// Ensure that firstIndex has increased indicating that the log
// truncation has occurred.
var err error
afterTruncationIndex, err = raftLeaderRepl.GetFirstIndex()
if err != nil {
t.Fatal(err)
}
if afterTruncationIndex <= originalIndex {
return util.Errorf("raft log has not been truncated yet, afterTruncationIndex:%d originalIndex:%d",
afterTruncationIndex, originalIndex)
}
return nil
})
// Force a truncation check again to ensure that attempting to truncate an
// already truncated log has no effect.
for _, store := range mtc.stores {
store.ForceRaftLogScanAndProcess()
}
after2ndTruncationIndex, err := raftLeaderRepl.GetFirstIndex()
if err != nil {
t.Fatal(err)
}
if afterTruncationIndex > after2ndTruncationIndex {
t.Fatalf("second truncation destroyed state: afterTruncationIndex:%d after2ndTruncationIndex:%d",
afterTruncationIndex, after2ndTruncationIndex)
}
}
开发者ID:GitGoldie,项目名称:cockroach,代码行数:80,代码来源:client_raft_log_queue_test.go
示例16: TestStoreRangeSplitStatsWithMerges
// TestStoreRangeSplitStatsWithMerges starts by splitting the system keys from
// user-space keys and verifying that the user space side of the split (which is empty),
// has all zeros for stats. It then issues a number of Merge requests to the user
// space side, simulating TimeSeries data. Finally, the test splits the user space
// side halfway and verifies the stats on either side of the split are equal to a
// recomputation.
//
// Note that unlike TestStoreRangeSplitStats, we do not check if the two halves of the
// split's stats are equal to the pre-split stats when added, because this will not be
// true of ranges populated with Merge requests. The reason for this is that Merge
// requests' impact on MVCCStats are only estimated. See updateStatsOnMerge.
func TestStoreRangeSplitStatsWithMerges(t *testing.T) {
defer leaktest.AfterTest(t)()
sCtx := storage.TestStoreContext()
sCtx.TestingKnobs.DisableSplitQueue = true
store, stopper, manual := createTestStoreWithContext(t, sCtx)
defer stopper.Stop()
// Split the range after the last table data key.
keyPrefix := keys.MakeTablePrefix(keys.MaxReservedDescID + 1)
keyPrefix = keys.MakeRowSentinelKey(keyPrefix)
args := adminSplitArgs(roachpb.KeyMin, keyPrefix)
if _, pErr := client.SendWrapped(rg1(store), nil, &args); pErr != nil {
t.Fatal(pErr)
}
// Verify empty range has empty stats.
rng := store.LookupReplica(keyPrefix, nil)
// NOTE that this value is expected to change over time, depending on what
// we store in the sys-local keyspace. Update it accordingly for this test.
empty := enginepb.MVCCStats{LastUpdateNanos: manual.UnixNano()}
if err := verifyRangeStats(store.Engine(), rng.RangeID, empty); err != nil {
t.Fatal(err)
}
// Write random TimeSeries data.
midKey := writeRandomTimeSeriesDataToRange(t, store, rng.RangeID, keyPrefix)
manual.Increment(100)
// Split the range at approximate halfway point.
args = adminSplitArgs(keyPrefix, midKey)
if _, pErr := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
RangeID: rng.RangeID,
}, &args); pErr != nil {
t.Fatal(pErr)
}
snap := store.Engine().NewSnapshot()
defer snap.Close()
var msLeft, msRight enginepb.MVCCStats
if err := engine.MVCCGetRangeStats(context.Background(), snap, rng.RangeID, &msLeft); err != nil {
t.Fatal(err)
}
rngRight := store.LookupReplica(midKey, nil)
if err := engine.MVCCGetRangeStats(context.Background(), snap, rngRight.RangeID, &msRight); err != nil {
t.Fatal(err)
}
// Stats should both have the new timestamp.
now := manual.UnixNano()
if lTs := msLeft.LastUpdateNanos; lTs != now {
t.Errorf("expected left range stats to have new timestamp, want %d, got %d", now, lTs)
}
if rTs := msRight.LastUpdateNanos; rTs != now {
t.Errorf("expected right range stats to have new timestamp, want %d, got %d", now, rTs)
}
// Stats should agree with recomputation.
if err := verifyRecomputedStats(snap, rng.Desc(), msLeft, now); err != nil {
t.Fatalf("failed to verify left range's stats after split: %v", err)
}
if err := verifyRecomputedStats(snap, rngRight.Desc(), msRight, now); err != nil {
t.Fatalf("failed to verify right range's stats after split: %v", err)
}
}
开发者ID:YuleiXiao,项目名称:cockroach,代码行数:74,代码来源:client_split_test.go
示例17: TestStoreRangeSplitStats
// TestStoreRangeSplitStats starts by splitting the system keys from user-space
// keys and verifying that the user space side of the split (which is empty),
// has all zeros for stats. It then writes random data to the user space side,
// splits it halfway and verifies the two splits have stats exactly equaling
// the pre-split.
func TestStoreRangeSplitStats(t *testing.T) {
defer leaktest.AfterTest(t)()
sCtx := storage.TestStoreContext()
sCtx.TestingKnobs.DisableSplitQueue = true
store, stopper, manual := createTestStoreWithContext(t, sCtx)
defer stopper.Stop()
// Split the range after the last table data key.
keyPrefix := keys.MakeTablePrefix(keys.MaxReservedDescID + 1)
keyPrefix = keys.MakeRowSentinelKey(keyPrefix)
args := adminSplitArgs(roachpb.KeyMin, keyPrefix)
if _, pErr := client.SendWrapped(rg1(store), nil, &args); pErr != nil {
t.Fatal(pErr)
}
// Verify empty range has empty stats.
rng := store.LookupReplica(keyPrefix, nil)
// NOTE that this value is expected to change over time, depending on what
// we store in the sys-local keyspace. Update it accordingly for this test.
empty := enginepb.MVCCStats{LastUpdateNanos: manual.UnixNano()}
if err := verifyRangeStats(store.Engine(), rng.RangeID, empty); err != nil {
t.Fatal(err)
}
// Write random data.
midKey := writeRandomDataToRange(t, store, rng.RangeID, keyPrefix)
// Get the range stats now that we have data.
snap := store.Engine().NewSnapshot()
defer snap.Close()
var ms enginepb.MVCCStats
if err := engine.MVCCGetRangeStats(context.Background(), snap, rng.RangeID, &ms); err != nil {
t.Fatal(err)
}
if err := verifyRecomputedStats(snap, rng.Desc(), ms, manual.UnixNano()); err != nil {
t.Fatalf("failed to verify range's stats before split: %v", err)
}
if inMemMS := rng.GetMVCCStats(); inMemMS != ms {
t.Fatalf("in-memory and on-disk diverged:\n%+v\n!=\n%+v", inMemMS, ms)
}
manual.Increment(100)
// Split the range at approximate halfway point.
args = adminSplitArgs(keyPrefix, midKey)
if _, pErr := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
RangeID: rng.RangeID,
}, &args); pErr != nil {
t.Fatal(pErr)
}
snap = store.Engine().NewSnapshot()
defer snap.Close()
var msLeft, msRight enginepb.MVCCStats
if err := engine.MVCCGetRangeStats(context.Background(), snap, rng.RangeID, &msLeft); err != nil {
t.Fatal(err)
}
rngRight := store.LookupReplica(midKey, nil)
if err := engine.MVCCGetRangeStats(context.Background(), snap, rngRight.RangeID, &msRight); err != nil {
t.Fatal(err)
}
// The stats should be exactly equal when added.
expMS := enginepb.MVCCStats{
LiveBytes: msLeft.LiveBytes + msRight.LiveBytes,
KeyBytes: msLeft.KeyBytes + msRight.KeyBytes,
ValBytes: msLeft.ValBytes + msRight.ValBytes,
IntentBytes: msLeft.IntentBytes + msRight.IntentBytes,
LiveCount: msLeft.LiveCount + msRight.LiveCount,
KeyCount: msLeft.KeyCount + msRight.KeyCount,
ValCount: msLeft.ValCount + msRight.ValCount,
IntentCount: msLeft.IntentCount + msRight.IntentCount,
}
ms.SysBytes, ms.SysCount = 0, 0
ms.LastUpdateNanos = 0
if expMS != ms {
t.Errorf("expected left plus right ranges to equal original, but\n %+v\n+\n %+v\n!=\n %+v", msLeft, msRight, ms)
}
// Stats should both have the new timestamp.
now := manual.UnixNano()
if lTs := msLeft.LastUpdateNanos; lTs != now {
t.Errorf("expected left range stats to have new timestamp, want %d, got %d", now, lTs)
}
if rTs := msRight.LastUpdateNanos; rTs != now {
t.Errorf("expected right range stats to have new timestamp, want %d, got %d", now, rTs)
}
// Stats should agree with recomputation.
if err := verifyRecomputedStats(snap, rng.Desc(), msLeft, now); err != nil {
t.Fatalf("failed to verify left range's stats after split: %v", err)
}
if err := verifyRecomputedStats(snap, rngRight.Desc(), msRight, now); err != nil {
t.Fatalf("failed to verify right range's stats after split: %v", err)
}
}
开发者ID:YuleiXiao,项目名称:cockroach,代码行数:100,代码来源:client_split_test.go
示例18: TestStoreRangeSplitIdempotency
// TestStoreRangeSplit executes a split of a range and verifies that the
// resulting ranges respond to the right key ranges and that their stats
// have been properly accounted for and requests can't be replayed.
func TestStoreRangeSplitIdempotency(t *testing.T) {
defer leaktest.AfterTest(t)()
sCtx := storage.TestStoreContext()
sCtx.TestingKnobs.DisableSplitQueue = true
store, stopper, _ := createTestStoreWithContext(t, sCtx)
defer stopper.Stop()
rangeID := roachpb.RangeID(1)
splitKey := roachpb.Key("m")
content := roachpb.Key("asdvb")
// First, write some values left and right of the proposed split key.
pArgs := putArgs([]byte("c"), content)
if _, pErr := client.SendWrapped(rg1(store), nil, &pArgs); pErr != nil {
t.Fatal(pErr)
}
pArgs = putArgs([]byte("x"), content)
if _, pErr := client.SendWrapped(rg1(store), nil, &pArgs); pErr != nil {
t.Fatal(pErr)
}
// Increments are a good way of testing idempotency. Up here, we
// address them to the original range, then later to the one that
// contains the key.
txn := roachpb.NewTransaction("test", []byte("c"), 10, enginepb.SERIALIZABLE,
store.Clock().Now(), 0)
lIncArgs := incrementArgs([]byte("apoptosis"), 100)
lTxn := *txn
lTxn.Sequence++
if _, pErr := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
Txn: &lTxn,
}, &lIncArgs); pErr != nil {
t.Fatal(pErr)
}
rIncArgs := incrementArgs([]byte("wobble"), 10)
rTxn := *txn
rTxn.Sequence++
if _, pErr := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
Txn: &rTxn,
}, &rIncArgs); pErr != nil {
t.Fatal(pErr)
}
// Get the original stats for key and value bytes.
var ms enginepb.MVCCStats
if err := engine.MVCCGetRangeStats(context.Background(), store.Engine(), rangeID, &ms); err != nil {
t.Fatal(err)
}
keyBytes, valBytes := ms.KeyBytes, ms.ValBytes
// Split the range.
args := adminSplitArgs(roachpb.KeyMin, splitKey)
if _, pErr := client.SendWrapped(rg1(store), nil, &args); pErr != nil {
t.Fatal(pErr)
}
// Verify no intents remains on range descriptor keys.
splitKeyAddr, err := keys.Addr(splitKey)
if err != nil {
t.Fatal(err)
}
for _, key := range []roachpb.Key{keys.RangeDescriptorKey(roachpb.RKeyMin), keys.RangeDescriptorKey(splitKeyAddr)} {
if _, _, err := engine.MVCCGet(context.Background(), store.Engine(), key, store.Clock().Now(), true, nil); err != nil {
t.Fatal(err)
}
|
请发表评论