本文整理汇总了Golang中github.com/cockroachdb/cockroach/pkg/storage.TestStoreConfig函数的典型用法代码示例。如果您正苦于以下问题:Golang TestStoreConfig函数的具体用法?Golang TestStoreConfig怎么用?Golang TestStoreConfig使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了TestStoreConfig函数的19个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: TestStoreRangeMergeStats
// TestStoreRangeMergeStats starts by splitting a range, then writing random data
// to both sides of the split. It then merges the ranges and verifies the merged
// range has stats consistent with recomputations.
func TestStoreRangeMergeStats(t *testing.T) {
defer leaktest.AfterTest(t)()
manual := hlc.NewManualClock(123)
storeCfg := storage.TestStoreConfig(hlc.NewClock(manual.UnixNano, time.Nanosecond))
storeCfg.TestingKnobs.DisableSplitQueue = true
store, stopper := createTestStoreWithConfig(t, storeCfg)
defer stopper.Stop()
// Split the range.
aDesc, bDesc, pErr := createSplitRanges(store)
if pErr != nil {
t.Fatal(pErr)
}
// Write some values left and right of the proposed split key.
writeRandomDataToRange(t, store, aDesc.RangeID, []byte("aaa"))
writeRandomDataToRange(t, store, bDesc.RangeID, []byte("ccc"))
// Get the range stats for both ranges now that we have data.
snap := store.Engine().NewSnapshot()
defer snap.Close()
msA, err := engine.MVCCGetRangeStats(context.Background(), snap, aDesc.RangeID)
if err != nil {
t.Fatal(err)
}
msB, err := engine.MVCCGetRangeStats(context.Background(), snap, bDesc.RangeID)
if err != nil {
t.Fatal(err)
}
// Stats should agree with recomputation.
if err := verifyRecomputedStats(snap, aDesc, msA, manual.UnixNano()); err != nil {
t.Fatalf("failed to verify range A's stats before split: %v", err)
}
if err := verifyRecomputedStats(snap, bDesc, msB, manual.UnixNano()); err != nil {
t.Fatalf("failed to verify range B's stats before split: %v", err)
}
manual.Increment(100)
// Merge the b range back into the a range.
args := adminMergeArgs(roachpb.KeyMin)
if _, err := client.SendWrapped(context.Background(), rg1(store), &args); err != nil {
t.Fatal(err)
}
replMerged := store.LookupReplica(aDesc.StartKey, nil)
// Get the range stats for the merged range and verify.
snap = store.Engine().NewSnapshot()
defer snap.Close()
msMerged, err := engine.MVCCGetRangeStats(context.Background(), snap, replMerged.RangeID)
if err != nil {
t.Fatal(err)
}
// Merged stats should agree with recomputation.
if err := verifyRecomputedStats(snap, replMerged.Desc(), msMerged, manual.UnixNano()); err != nil {
t.Errorf("failed to verify range's stats after merge: %v", err)
}
}
开发者ID:veteranlu,项目名称:cockroach,代码行数:63,代码来源:client_merge_test.go
示例2: TestStoreRangeMergeTwoEmptyRanges
// TestStoreRangeMergeTwoEmptyRanges tries to merge two empty ranges together.
func TestStoreRangeMergeTwoEmptyRanges(t *testing.T) {
defer leaktest.AfterTest(t)()
storeCfg := storage.TestStoreConfig(nil)
storeCfg.TestingKnobs.DisableSplitQueue = true
store, stopper := createTestStoreWithConfig(t, storeCfg)
defer stopper.Stop()
if _, _, err := createSplitRanges(store); err != nil {
t.Fatal(err)
}
// Merge the b range back into the a range.
args := adminMergeArgs(roachpb.KeyMin)
_, err := client.SendWrapped(context.Background(), rg1(store), &args)
if err != nil {
t.Fatal(err)
}
// Verify the merge by looking up keys from both ranges.
replicaA := store.LookupReplica([]byte("a"), nil)
replicaB := store.LookupReplica([]byte("c"), nil)
if !reflect.DeepEqual(replicaA, replicaB) {
t.Fatalf("ranges were not merged %s!=%s", replicaA, replicaB)
}
}
开发者ID:veteranlu,项目名称:cockroach,代码行数:27,代码来源:client_merge_test.go
示例3: TestConsistencyQueueRequiresLive
// TestConsistencyQueueRequiresLive verifies the queue will not
// process ranges whose replicas are not all live.
func TestConsistencyQueueRequiresLive(t *testing.T) {
defer leaktest.AfterTest(t)()
sc := storage.TestStoreConfig(nil)
mtc := &multiTestContext{storeConfig: &sc}
defer mtc.Stop()
mtc.Start(t, 3)
// Replicate the range to three nodes.
repl := mtc.stores[0].LookupReplica(roachpb.RKeyMin, nil)
rangeID := repl.RangeID
mtc.replicateRange(rangeID, 1, 2)
// Verify that queueing is immediately possible.
if shouldQ, priority := mtc.stores[0].ConsistencyQueueShouldQueue(
context.TODO(), mtc.clock.Now(), repl, config.SystemConfig{}); !shouldQ {
t.Fatalf("expected shouldQ true; got %t, %f", shouldQ, priority)
}
// Stop a node and expire leases.
mtc.stopStore(2)
mtc.expireLeases(context.TODO())
if shouldQ, priority := mtc.stores[0].ConsistencyQueueShouldQueue(
context.TODO(), mtc.clock.Now(), repl, config.SystemConfig{}); shouldQ {
t.Fatalf("expected shouldQ false; got %t, %f", shouldQ, priority)
}
}
开发者ID:EvilMcJerkface,项目名称:cockroach,代码行数:29,代码来源:consistency_queue_test.go
示例4: createTestNode
// createTestNode creates an rpc server using the specified address,
// gossip instance, KV database and a node using the specified slice
// of engines. The server, clock and node are returned. If gossipBS is
// not nil, the gossip bootstrap address is set to gossipBS.
func createTestNode(
addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T,
) (*grpc.Server, net.Addr, *hlc.Clock, *Node, *stop.Stopper) {
cfg := storage.TestStoreConfig(nil)
stopper := stop.NewStopper()
nodeRPCContext := rpc.NewContext(log.AmbientContext{}, nodeTestBaseContext, cfg.Clock, stopper)
cfg.ScanInterval = 10 * time.Hour
cfg.ConsistencyCheckInterval = 10 * time.Hour
grpcServer := rpc.NewServer(nodeRPCContext)
serverCfg := makeTestConfig()
cfg.Gossip = gossip.NewTest(
0,
nodeRPCContext,
grpcServer,
serverCfg.GossipBootstrapResolvers,
stopper,
metric.NewRegistry(),
)
ln, err := netutil.ListenAndServeGRPC(stopper, grpcServer, addr)
if err != nil {
t.Fatal(err)
}
if gossipBS != nil {
// Handle possibility of a :0 port specification.
if gossipBS.Network() == addr.Network() && gossipBS.String() == addr.String() {
gossipBS = ln.Addr()
}
r, err := resolver.NewResolverFromAddress(gossipBS)
if err != nil {
t.Fatalf("bad gossip address %s: %s", gossipBS, err)
}
cfg.Gossip.SetResolvers([]resolver.Resolver{r})
cfg.Gossip.Start(ln.Addr())
}
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = stopper.ShouldQuiesce()
distSender := kv.NewDistSender(kv.DistSenderConfig{
Clock: cfg.Clock,
RPCContext: nodeRPCContext,
RPCRetryOptions: &retryOpts,
}, cfg.Gossip)
cfg.AmbientCtx.Tracer = tracing.NewTracer()
sender := kv.NewTxnCoordSender(
cfg.AmbientCtx,
distSender,
cfg.Clock,
false,
stopper,
kv.MakeTxnMetrics(metric.TestSampleInterval),
)
cfg.DB = client.NewDB(sender)
cfg.Transport = storage.NewDummyRaftTransport()
cfg.MetricsSampleInterval = metric.TestSampleInterval
node := NewNode(cfg, status.NewMetricsRecorder(cfg.Clock), metric.NewRegistry(), stopper,
kv.MakeTxnMetrics(metric.TestSampleInterval), sql.MakeEventLogger(nil))
roachpb.RegisterInternalServer(grpcServer, node)
return grpcServer, ln.Addr(), cfg.Clock, node, stopper
}
开发者ID:hvaara,项目名称:cockroach,代码行数:63,代码来源:node_test.go
示例5: Start
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester, baseCtx *base.Config, initSender InitSenderFn) {
ambient := log.AmbientContext{Tracer: tracing.NewTracer()}
nc := &base.NodeIDContainer{}
ambient.AddLogTag("n", nc)
nodeID := roachpb.NodeID(1)
nodeDesc := &roachpb.NodeDescriptor{NodeID: nodeID}
ltc.tester = t
ltc.Manual = hlc.NewManualClock(0)
ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
ltc.Stopper = stop.NewStopper()
rpcContext := rpc.NewContext(ambient, baseCtx, ltc.Clock, ltc.Stopper)
server := rpc.NewServer(rpcContext) // never started
ltc.Gossip = gossip.New(ambient, nc, rpcContext, server, nil, ltc.Stopper, metric.NewRegistry())
ltc.Eng = engine.NewInMem(roachpb.Attributes{}, 50<<20)
ltc.Stopper.AddCloser(ltc.Eng)
ltc.Stores = storage.NewStores(ambient, ltc.Clock)
ltc.Sender = initSender(nodeDesc, ambient.Tracer, ltc.Clock, ltc.Latency, ltc.Stores, ltc.Stopper,
ltc.Gossip)
if ltc.DBContext == nil {
dbCtx := client.DefaultDBContext()
ltc.DBContext = &dbCtx
}
ltc.DB = client.NewDBWithContext(ltc.Sender, *ltc.DBContext)
transport := storage.NewDummyRaftTransport()
cfg := storage.TestStoreConfig()
if ltc.RangeRetryOptions != nil {
cfg.RangeRetryOptions = *ltc.RangeRetryOptions
}
cfg.AmbientCtx = ambient
cfg.Clock = ltc.Clock
cfg.DB = ltc.DB
cfg.Gossip = ltc.Gossip
cfg.Transport = transport
cfg.MetricsSampleInterval = metric.TestSampleInterval
ltc.Store = storage.NewStore(cfg, ltc.Eng, nodeDesc)
if err := ltc.Store.Bootstrap(roachpb.StoreIdent{NodeID: nodeID, StoreID: 1}); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
ltc.Stores.AddStore(ltc.Store)
if err := ltc.Store.BootstrapRange(nil); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
if err := ltc.Store.Start(context.Background(), ltc.Stopper); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
nc.Set(context.TODO(), nodeDesc.NodeID)
if err := ltc.Gossip.SetNodeDescriptor(nodeDesc); err != nil {
t.Fatalf("unable to set node descriptor: %s", err)
}
}
开发者ID:knz,项目名称:cockroach,代码行数:59,代码来源:local_test_cluster.go
示例6: TestLeaseNotUsedAfterRestart
// Test that leases held before a restart are not used after the restart.
// See replica.mu.minLeaseProposedTS for the reasons why this isn't allowed.
func TestLeaseNotUsedAfterRestart(t *testing.T) {
defer leaktest.AfterTest(t)()
sc := storage.TestStoreConfig(nil)
var leaseAcquisitionTrap atomic.Value
// Disable the split queue so that no ranges are split. This makes it easy
// below to trap any lease request and infer that it refers to the range we're
// interested in.
sc.TestingKnobs.DisableSplitQueue = true
sc.TestingKnobs.LeaseRequestEvent = func(ts hlc.Timestamp) {
val := leaseAcquisitionTrap.Load()
if val == nil {
return
}
trapCallback := val.(func(ts hlc.Timestamp))
if trapCallback != nil {
trapCallback(ts)
}
}
mtc := &multiTestContext{storeConfig: &sc}
mtc.Start(t, 1)
defer mtc.Stop()
// Send a read, to acquire a lease.
getArgs := getArgs([]byte("a"))
if _, err := client.SendWrapped(context.Background(), rg1(mtc.stores[0]), &getArgs); err != nil {
t.Fatal(err)
}
// Restart the mtc. Before we do that, we're installing a callback used to
// assert that a new lease has been requested. The callback is installed
// before the restart, as the lease might be requested at any time and for
// many reasons by background processes, even before we send the read below.
leaseAcquisitionCh := make(chan error)
var once sync.Once
leaseAcquisitionTrap.Store(func(_ hlc.Timestamp) {
once.Do(func() {
close(leaseAcquisitionCh)
})
})
mtc.restart()
// Send another read and check that the pre-existing lease has not been used.
// Concretely, we check that a new lease is requested.
if _, err := client.SendWrapped(context.Background(), rg1(mtc.stores[0]), &getArgs); err != nil {
t.Fatal(err)
}
// Check that the Send above triggered a lease acquisition.
select {
case <-leaseAcquisitionCh:
case <-time.After(time.Second):
t.Fatalf("read did not acquire a new lease")
}
}
开发者ID:bdarnell,项目名称:cockroach,代码行数:55,代码来源:client_replica_test.go
示例7: TestReplicaGCQueueDropReplicaDirect
// TestReplicaGCQueueDropReplica verifies that a removed replica is
// immediately cleaned up.
func TestReplicaGCQueueDropReplicaDirect(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := &multiTestContext{}
const numStores = 3
rangeID := roachpb.RangeID(1)
// In this test, the Replica on the second Node is removed, and the test
// verifies that that Node adds this Replica to its RangeGCQueue. However,
// the queue does a consistent lookup which will usually be read from
// Node 1. Hence, if Node 1 hasn't processed the removal when Node 2 has,
// no GC will take place since the consistent RangeLookup hits the first
// Node. We use the TestingCommandFilter to make sure that the second Node
// waits for the first.
cfg := storage.TestStoreConfig(nil)
mtc.storeConfig = &cfg
mtc.storeConfig.TestingKnobs.TestingCommandFilter =
func(filterArgs storagebase.FilterArgs) *roachpb.Error {
et, ok := filterArgs.Req.(*roachpb.EndTransactionRequest)
if !ok || filterArgs.Sid != 2 {
return nil
}
crt := et.InternalCommitTrigger.GetChangeReplicasTrigger()
if crt == nil || crt.ChangeType != roachpb.REMOVE_REPLICA {
return nil
}
testutils.SucceedsSoon(t, func() error {
r, err := mtc.stores[0].GetReplica(rangeID)
if err != nil {
return err
}
if _, ok := r.Desc().GetReplicaDescriptor(2); ok {
return errors.New("expected second node gone from first node's known replicas")
}
return nil
})
return nil
}
defer mtc.Stop()
mtc.Start(t, numStores)
mtc.replicateRange(rangeID, 1, 2)
mtc.unreplicateRange(rangeID, 1)
// Make sure the range is removed from the store.
testutils.SucceedsSoon(t, func() error {
if _, err := mtc.stores[1].GetReplica(rangeID); !testutils.IsError(err, "range .* was not found") {
return errors.Errorf("expected range removal: %v", err) // NB: errors.Wrapf(nil, ...) returns nil.
}
return nil
})
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:54,代码来源:client_replica_gc_test.go
示例8: TestStoreRangeMergeLastRange
// TestStoreRangeMergeLastRange verifies that merging the last range
// fails.
func TestStoreRangeMergeLastRange(t *testing.T) {
defer leaktest.AfterTest(t)()
storeCfg := storage.TestStoreConfig(nil)
storeCfg.TestingKnobs.DisableSplitQueue = true
store, stopper := createTestStoreWithConfig(t, storeCfg)
defer stopper.Stop()
// Merge last range.
args := adminMergeArgs(roachpb.KeyMin)
if _, pErr := client.SendWrapped(context.Background(), rg1(store), &args); !testutils.IsPError(pErr, "cannot merge final range") {
t.Fatalf("expected 'cannot merge final range' error; got %s", pErr)
}
}
开发者ID:veteranlu,项目名称:cockroach,代码行数:15,代码来源:client_merge_test.go
示例9: BenchmarkStoreRangeMerge
func BenchmarkStoreRangeMerge(b *testing.B) {
defer tracing.Disable()()
storeCfg := storage.TestStoreConfig(nil)
storeCfg.TestingKnobs.DisableSplitQueue = true
stopper := stop.NewStopper()
defer stopper.Stop()
store := createTestStoreWithConfig(b, stopper, storeCfg)
// Perform initial split of ranges.
sArgs := adminSplitArgs(roachpb.KeyMin, []byte("b"))
if _, err := client.SendWrapped(context.Background(), rg1(store), sArgs); err != nil {
b.Fatal(err)
}
// Write some values left and right of the proposed split key.
aDesc := store.LookupReplica([]byte("a"), nil).Desc()
bDesc := store.LookupReplica([]byte("c"), nil).Desc()
writeRandomDataToRange(b, store, aDesc.RangeID, []byte("aaa"))
writeRandomDataToRange(b, store, bDesc.RangeID, []byte("ccc"))
// Create args to merge the b range back into the a range.
mArgs := adminMergeArgs(roachpb.KeyMin)
b.ResetTimer()
for i := 0; i < b.N; i++ {
// Merge the ranges.
b.StartTimer()
if _, err := client.SendWrapped(context.Background(), rg1(store), mArgs); err != nil {
b.Fatal(err)
}
// Split the range.
b.StopTimer()
if _, err := client.SendWrapped(context.Background(), rg1(store), sArgs); err != nil {
b.Fatal(err)
}
}
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:38,代码来源:client_merge_test.go
示例10: TestStoreRangeMergeMetadataCleanup
// TestStoreRangeMergeMetadataCleanup tests that all metadata of a
// subsumed range is cleaned up on merge.
func TestStoreRangeMergeMetadataCleanup(t *testing.T) {
defer leaktest.AfterTest(t)()
storeCfg := storage.TestStoreConfig(nil)
storeCfg.TestingKnobs.DisableSplitQueue = true
store, stopper := createTestStoreWithConfig(t, storeCfg)
defer stopper.Stop()
scan := func(f func(roachpb.KeyValue) (bool, error)) {
if _, err := engine.MVCCIterate(context.Background(), store.Engine(), roachpb.KeyMin, roachpb.KeyMax, hlc.ZeroTimestamp, true, nil, false, f); err != nil {
t.Fatal(err)
}
}
content := roachpb.Key("testing!")
// Write some values left of the proposed split key.
pArgs := putArgs([]byte("aaa"), content)
if _, err := client.SendWrapped(context.Background(), rg1(store), &pArgs); err != nil {
t.Fatal(err)
}
// Collect all the keys.
preKeys := make(map[string]struct{})
scan(func(kv roachpb.KeyValue) (bool, error) {
preKeys[string(kv.Key)] = struct{}{}
return false, nil
})
// Split the range.
_, bDesc, err := createSplitRanges(store)
if err != nil {
t.Fatal(err)
}
// Write some values right of the split key.
pArgs = putArgs([]byte("ccc"), content)
if _, err := client.SendWrappedWith(context.Background(), rg1(store), roachpb.Header{
RangeID: bDesc.RangeID,
}, &pArgs); err != nil {
t.Fatal(err)
}
// Merge the b range back into the a range.
args := adminMergeArgs(roachpb.KeyMin)
if _, err := client.SendWrapped(context.Background(), rg1(store), &args); err != nil {
t.Fatal(err)
}
// Collect all the keys again.
postKeys := make(map[string]struct{})
scan(func(kv roachpb.KeyValue) (bool, error) {
postKeys[string(kv.Key)] = struct{}{}
return false, nil
})
// Compute the new keys.
for k := range preKeys {
delete(postKeys, k)
}
// Keep only the subsumed range's local keys.
localRangeKeyPrefix := string(keys.MakeRangeIDPrefix(bDesc.RangeID))
for k := range postKeys {
if !strings.HasPrefix(k, localRangeKeyPrefix) {
delete(postKeys, k)
}
}
if numKeys := len(postKeys); numKeys > 0 {
var buf bytes.Buffer
fmt.Fprintf(&buf, "%d keys were not cleaned up:\n", numKeys)
for k := range postKeys {
fmt.Fprintf(&buf, "%q\n", k)
}
t.Fatal(buf.String())
}
}
开发者ID:veteranlu,项目名称:cockroach,代码行数:78,代码来源:client_merge_test.go
示例11: TestRangeTransferLease
func TestRangeTransferLease(t *testing.T) {
defer leaktest.AfterTest(t)()
cfg := storage.TestStoreConfig(nil)
var filterMu syncutil.Mutex
var filter func(filterArgs storagebase.FilterArgs) *roachpb.Error
cfg.TestingKnobs.TestingCommandFilter =
func(filterArgs storagebase.FilterArgs) *roachpb.Error {
filterMu.Lock()
filterCopy := filter
filterMu.Unlock()
if filterCopy != nil {
return filterCopy(filterArgs)
}
return nil
}
var waitForTransferBlocked atomic.Value
waitForTransferBlocked.Store(false)
transferBlocked := make(chan struct{})
cfg.TestingKnobs.LeaseTransferBlockedOnExtensionEvent = func(
_ roachpb.ReplicaDescriptor) {
if waitForTransferBlocked.Load().(bool) {
transferBlocked <- struct{}{}
waitForTransferBlocked.Store(false)
}
}
mtc := &multiTestContext{}
mtc.storeConfig = &cfg
mtc.Start(t, 2)
defer mtc.Stop()
// First, do a write; we'll use it to determine when the dust has settled.
leftKey := roachpb.Key("a")
incArgs := incrementArgs(leftKey, 1)
if _, pErr := client.SendWrapped(context.Background(), mtc.distSenders[0], &incArgs); pErr != nil {
t.Fatal(pErr)
}
// Get the left range's ID.
rangeID := mtc.stores[0].LookupReplica(roachpb.RKey("a"), nil).RangeID
// Replicate the left range onto node 1.
mtc.replicateRange(rangeID, 1)
replica0 := mtc.stores[0].LookupReplica(roachpb.RKey("a"), nil)
replica1 := mtc.stores[1].LookupReplica(roachpb.RKey("a"), nil)
gArgs := getArgs(leftKey)
replica0Desc, err := replica0.GetReplicaDescriptor()
if err != nil {
t.Fatal(err)
}
// Check that replica0 can serve reads OK.
if _, pErr := client.SendWrappedWith(
context.Background(),
mtc.senders[0],
roachpb.Header{Replica: replica0Desc},
&gArgs,
); pErr != nil {
t.Fatal(pErr)
}
{
// Transferring the lease to ourself should be a no-op.
origLeasePtr, _ := replica0.GetLease()
origLease := *origLeasePtr
if err := replica0.AdminTransferLease(replica0Desc.StoreID); err != nil {
t.Fatal(err)
}
newLeasePtr, _ := replica0.GetLease()
if origLeasePtr != newLeasePtr || origLease != *newLeasePtr {
t.Fatalf("expected %+v, but found %+v", origLeasePtr, newLeasePtr)
}
}
{
// An invalid target should result in an error.
const expected = "unable to find store .* in range"
if err := replica0.AdminTransferLease(1000); !testutils.IsError(err, expected) {
t.Fatalf("expected %s, but found %v", expected, err)
}
}
// Move the lease to store 1.
var newHolderDesc roachpb.ReplicaDescriptor
util.SucceedsSoon(t, func() error {
var err error
newHolderDesc, err = replica1.GetReplicaDescriptor()
return err
})
if err := replica0.AdminTransferLease(newHolderDesc.StoreID); err != nil {
t.Fatal(err)
}
// Check that replica0 doesn't serve reads any more.
replica0Desc, err = replica0.GetReplicaDescriptor()
if err != nil {
t.Fatal(err)
}
_, pErr := client.SendWrappedWith(
context.Background(),
//.........这里部分代码省略.........
开发者ID:bdarnell,项目名称:cockroach,代码行数:101,代码来源:client_replica_test.go
示例12: TestStoreRangeLeaseSwitcheroo
// TestStoreRangeLeaseSwitcheroo verifies that ranges can be switched
// between expiration and epoch and back.
func TestStoreRangeLeaseSwitcheroo(t *testing.T) {
defer leaktest.AfterTest(t)()
sc := storage.TestStoreConfig(nil)
sc.EnableEpochRangeLeases = true
mtc := &multiTestContext{storeConfig: &sc}
defer mtc.Stop()
mtc.Start(t, 1)
splitKey := roachpb.Key("a")
splitArgs := adminSplitArgs(splitKey, splitKey)
if _, pErr := client.SendWrapped(context.Background(), mtc.distSenders[0], splitArgs); pErr != nil {
t.Fatal(pErr)
}
// Allow leases to expire and send commands to ensure we
// re-acquire, then check types again.
mtc.expireLeases()
if _, err := mtc.dbs[0].Inc(context.TODO(), splitKey, 1); err != nil {
t.Fatalf("failed to increment: %s", err)
}
// We started with epoch ranges enabled, so verify we have an epoch lease.
repl := mtc.stores[0].LookupReplica(roachpb.RKey(splitKey), nil)
lease, _ := repl.GetLease()
if lt := lease.Type(); lt != roachpb.LeaseEpoch {
t.Fatalf("expected lease type epoch; got %d", lt)
}
// Stop the store and reverse the epoch range lease setting.
mtc.stopStore(0)
sc.EnableEpochRangeLeases = false
mtc.restartStore(0)
mtc.expireLeases()
if _, err := mtc.dbs[0].Inc(context.TODO(), splitKey, 1); err != nil {
t.Fatalf("failed to increment: %s", err)
}
// Verify we end up with an expiration lease on restart.
repl = mtc.stores[0].LookupReplica(roachpb.RKey(splitKey), nil)
lease, _ = repl.GetLease()
if lt := lease.Type(); lt != roachpb.LeaseExpiration {
t.Fatalf("expected lease type expiration; got %d", lt)
}
// Now, one more time, switch back to epoch-based.
mtc.stopStore(0)
sc.EnableEpochRangeLeases = true
mtc.restartStore(0)
mtc.expireLeases()
if _, err := mtc.dbs[0].Inc(context.TODO(), splitKey, 1); err != nil {
t.Fatalf("failed to increment: %s", err)
}
// Verify we end up with an epoch lease on restart.
repl = mtc.stores[0].LookupReplica(roachpb.RKey(splitKey), nil)
lease, _ = repl.GetLease()
if lt := lease.Type(); lt != roachpb.LeaseEpoch {
t.Fatalf("expected lease type epoch; got %d", lt)
}
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:64,代码来源:client_lease_test.go
示例13: TestStoreRangeLease
// TestStoreRangeLease verifies that ranges after range 0 get
// epoch-based range leases if enabled and expiration-based
// otherwise.
func TestStoreRangeLease(t *testing.T) {
defer leaktest.AfterTest(t)()
for _, enableEpoch := range []bool{true, false} {
t.Run(fmt.Sprintf("epoch-based leases? %t", enableEpoch), func(t *testing.T) {
sc := storage.TestStoreConfig(nil)
sc.EnableEpochRangeLeases = enableEpoch
mtc := &multiTestContext{storeConfig: &sc}
defer mtc.Stop()
mtc.Start(t, 1)
splitKeys := []roachpb.Key{roachpb.Key("a"), roachpb.Key("b"), roachpb.Key("c")}
for _, splitKey := range splitKeys {
splitArgs := adminSplitArgs(splitKey, splitKey)
if _, pErr := client.SendWrapped(context.Background(), mtc.distSenders[0], splitArgs); pErr != nil {
t.Fatal(pErr)
}
}
rLeft := mtc.stores[0].LookupReplica(roachpb.RKeyMin, nil)
lease, _ := rLeft.GetLease()
if lt := lease.Type(); lt != roachpb.LeaseExpiration {
t.Fatalf("expected lease type expiration; got %d", lt)
}
// After the split, expect an expiration lease for other ranges.
for _, key := range splitKeys {
repl := mtc.stores[0].LookupReplica(roachpb.RKey(key), nil)
lease, _ = repl.GetLease()
if lt := lease.Type(); lt != roachpb.LeaseExpiration {
t.Fatalf("%s: expected lease type epoch; got %d", key, lt)
}
}
// Allow leases to expire and send commands to ensure we
// re-acquire, then check types again.
mtc.expireLeases()
for _, key := range splitKeys {
if _, err := mtc.dbs[0].Inc(context.TODO(), key, 1); err != nil {
t.Fatalf("%s failed to increment: %s", key, err)
}
}
// After the expiration, expect an epoch lease for the RHS if
// we've enabled epoch based range leases.
for _, key := range splitKeys {
repl := mtc.stores[0].LookupReplica(roachpb.RKey(key), nil)
lease, _ = repl.GetLease()
if enableEpoch {
if lt := lease.Type(); lt != roachpb.LeaseEpoch {
t.Fatalf("expected lease type epoch; got %d", lt)
}
} else {
if lt := lease.Type(); lt != roachpb.LeaseExpiration {
t.Fatalf("expected lease type expiration; got %d", lt)
}
}
}
})
}
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:64,代码来源:client_lease_test.go
示例14: TestStoreRangeMergeWithData
// TestStoreRangeMergeWithData attempts to merge two collocate ranges
// each containing data.
func TestStoreRangeMergeWithData(t *testing.T) {
defer leaktest.AfterTest(t)()
storeCfg := storage.TestStoreConfig(nil)
storeCfg.TestingKnobs.DisableSplitQueue = true
store, stopper := createTestStoreWithConfig(t, storeCfg)
defer stopper.Stop()
content := roachpb.Key("testing!")
aDesc, bDesc, err := createSplitRanges(store)
if err != nil {
t.Fatal(err)
}
// Write some values left and right of the proposed split key.
pArgs := putArgs([]byte("aaa"), content)
if _, err := client.SendWrapped(context.Background(), rg1(store), &pArgs); err != nil {
t.Fatal(err)
}
pArgs = putArgs([]byte("ccc"), content)
if _, err := client.SendWrappedWith(context.Background(), rg1(store), roachpb.Header{
RangeID: bDesc.RangeID,
}, &pArgs); err != nil {
t.Fatal(err)
}
// Confirm the values are there.
gArgs := getArgs([]byte("aaa"))
if reply, err := client.SendWrapped(context.Background(), rg1(store), &gArgs); err != nil {
t.Fatal(err)
} else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil {
t.Fatal(err)
} else if !bytes.Equal(replyBytes, content) {
t.Fatalf("actual value %q did not match expected value %q", replyBytes, content)
}
gArgs = getArgs([]byte("ccc"))
if reply, err := client.SendWrappedWith(context.Background(), rg1(store), roachpb.Header{
RangeID: bDesc.RangeID,
}, &gArgs); err != nil {
t.Fatal(err)
} else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil {
t.Fatal(err)
} else if !bytes.Equal(replyBytes, content) {
t.Fatalf("actual value %q did not match expected value %q", replyBytes, content)
}
// Merge the b range back into the a range.
args := adminMergeArgs(roachpb.KeyMin)
if _, err := client.SendWrapped(context.Background(), rg1(store), &args); err != nil {
t.Fatal(err)
}
// Verify no intents remains on range descriptor keys.
for _, key := range []roachpb.Key{keys.RangeDescriptorKey(aDesc.StartKey), keys.RangeDescriptorKey(bDesc.StartKey)} {
if _, _, err := engine.MVCCGet(context.Background(), store.Engine(), key, store.Clock().Now(), true, nil); err != nil {
t.Fatal(err)
}
}
// Verify the merge by looking up keys from both ranges.
rangeA := store.LookupReplica([]byte("a"), nil)
rangeB := store.LookupReplica([]byte("c"), nil)
rangeADesc := rangeA.Desc()
rangeBDesc := rangeB.Desc()
if !reflect.DeepEqual(rangeA, rangeB) {
t.Fatalf("ranges were not merged %+v=%+v", rangeADesc, rangeBDesc)
}
if !bytes.Equal(rangeADesc.StartKey, roachpb.RKeyMin) {
t.Fatalf("The start key is not equal to KeyMin %q=%q", rangeADesc.StartKey, roachpb.RKeyMin)
}
if !bytes.Equal(rangeADesc.EndKey, roachpb.RKeyMax) {
t.Fatalf("The end key is not equal to KeyMax %q=%q", rangeADesc.EndKey, roachpb.RKeyMax)
}
// Try to get values from after the merge.
gArgs = getArgs([]byte("aaa"))
if reply, err := client.SendWrapped(context.Background(), rg1(store), &gArgs); err != nil {
t.Fatal(err)
} else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil {
t.Fatal(err)
} else if !bytes.Equal(replyBytes, content) {
t.Fatalf("actual value %q did not match expected value %q", replyBytes, content)
}
gArgs = getArgs([]byte("ccc"))
if reply, err := client.SendWrappedWith(context.Background(), rg1(store), roachpb.Header{
RangeID: rangeB.RangeID,
}, &gArgs); err != nil {
t.Fatal(err)
} else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil {
t.Fatal(err)
} else if !bytes.Equal(replyBytes, content) {
t.Fatalf("actual value %q did not match expected value %q", replyBytes, content)
}
// Put new values after the merge on both sides.
pArgs = putArgs([]byte("aaaa"), content)
if _, err := client.SendWrapped(context.Background(), rg1(store), &pArgs); err != nil {
//.........这里部分代码省略.........
开发者ID:veteranlu,项目名称:cockroach,代码行数:101,代码来源:client_merge_test.go
示例15: TestTimeSeriesMaintenanceQueue
// TestTimeSeriesMaintenanceQueue verifies shouldQueue and process method
// pass the correct data to the store's TimeSeriesData
func TestTimeSeriesMaintenanceQueue(t *testing.T) {
defer leaktest.AfterTest(t)()
model := &modelTimeSeriesDataStore{
t: t,
pruneSeenStartKeys: make(map[string]struct{}),
pruneSeenEndKeys: make(map[string]struct{}),
}
manual := hlc.NewManualClock(1)
cfg := storage.TestStoreConfig(hlc.NewClock(manual.UnixNano, time.Nanosecond))
cfg.TimeSeriesDataStore = model
cfg.TestingKnobs.DisableScanner = true
cfg.TestingKnobs.DisableSplitQueue = true
stopper := stop.NewStopper()
defer stopper.Stop()
store := createTestStoreWithConfig(t, stopper, cfg)
// Generate several splits.
splitKeys := []roachpb.Key{roachpb.Key("c"), roachpb.Key("b"), roachpb.Key("a")}
for _, k := range splitKeys {
repl := store.LookupReplica(roachpb.RKey(k), nil)
args := adminSplitArgs(k, k)
if _, pErr := client.SendWrappedWith(context.Background(), store, roachpb.Header{
RangeID: repl.RangeID,
}, args); pErr != nil {
t.Fatal(pErr)
}
}
// Generate a list of start/end keys the model should have been passed by
// the queue. This consists of all split keys, with KeyMin as an additional
// start and KeyMax as an additional end.
expectedStartKeys := make(map[string]struct{})
expectedEndKeys := make(map[string]struct{})
expectedStartKeys[roachpb.KeyMin.String()] = struct{}{}
expectedEndKeys[roachpb.KeyMax.String()] = struct{}{}
for _, expected := range splitKeys {
expectedStartKeys[expected.String()] = struct{}{}
expectedEndKeys[expected.String()] = struct{}{}
}
// Wait for splits to complete and system config to be available.
util.SucceedsSoon(t, func() error {
if a, e := store.ReplicaCount(), len(expectedEndKeys); a != e {
return fmt.Errorf("expected %d replicas in store; found %d", a, e)
}
if _, ok := store.Gossip().GetSystemConfig(); !ok {
return fmt.Errorf("system config not yet available")
}
return nil
})
// Force replica scan to run, which will populate the model.
now := store.Clock().Now()
store.ForceTimeSeriesMaintenanceQueueProcess()
// Wait for processing to complete.
util.SucceedsSoon(t, func() error {
model.Lock()
defer model.Unlock()
if a, e := model.containsCalled, len(expectedStartKeys); a != e {
return fmt.Errorf("ContainsTimeSeries called %d times; expected %d", a, e)
}
if a, e := model.pruneCalled, len(expectedStartKeys); a != e {
return fmt.Errorf("PruneTimeSeries called %d times; expected %d", a, e)
}
return nil
})
model.Lock()
if a, e := model.pruneSeenStartKeys, expectedStartKeys; !reflect.DeepEqual(a, e) {
t.Errorf("start keys seen by PruneTimeSeries did not match expectation: %s", pretty.Diff(a, e))
}
if a, e := model.pruneSeenEndKeys, expectedEndKeys; !reflect.DeepEqual(a, e) {
t.Errorf("end keys seen by PruneTimeSeries did not match expectation: %s", pretty.Diff(a, e))
}
model.Unlock()
util.SucceedsSoon(t, func() error {
keys := []roachpb.RKey{roachpb.RKeyMin}
for _, k := range splitKeys {
keys = append(keys, roachpb.RKey(k))
}
for _, key := range keys {
repl := store.LookupReplica(key, nil)
ts, err := repl.GetQueueLastProcessed(context.TODO(), "timeSeriesMaintenance")
if err != nil {
return err
}
if ts.Less(now) {
return errors.Errorf("expected last processed %s > %s", ts, now)
}
}
return nil
})
//.........这里部分代码省略.........
开发者ID:jmptrader,项目名称:cockroach,代码行数:101,代码来源:ts_maintenance_queue_test.go
示例16: TestRaftLogQueue
// TestRaftLogQueue verifies that the raft log queue correctly truncates the
// raft log.
func TestRaftLogQueue(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := &multiTestContext{}
// Set maxBytes to something small so we can trigger the raft log truncation
// without adding 64MB of logs.
const maxBytes = 1 << 16
defer config.TestingSetDefaultZoneConfig(config.ZoneConfig{
RangeMaxBytes: maxBytes,
})()
// Turn off raft elections so the raft leader won't change out from under
// us in this test.
sc := storage.TestStoreConfig(nil)
sc.RaftTickInterval = math.MaxInt32
sc.RaftElectionTimeoutTicks = 1000000
mtc.storeConfig = &sc
defer mtc.Stop()
mtc.Start(t, 3)
// Write a single value to ensure we have a leader.
pArgs := putArgs([]byte("key"), []byte("value"))
if _, err := client.SendWrapped(context.Background(), rg1(mtc.stores[0]), pArgs); err != nil {
t.Fatal(err)
}
// Get the raft leader (and ensure one exists).
rangeID := mtc.stores[0].LookupReplica([]byte("a"), nil).RangeID
raftLeaderRepl := mtc.getRaftLeader(rangeID)
if raftLeaderRepl == nil {
t.Fatalf("could not find raft leader replica for range %d", rangeID)
}
originalIndex, err := raftLeaderRepl.GetFirstIndex()
if err != nil {
t.Fatal(err)
}
// Disable splits since we're increasing the raft log with puts.
for _, store := range mtc.stores {
store.SetSplitQueueActive(false)
}
// Write a collection of values to increase the raft log.
value := bytes.Repeat([]byte("a"), 1000) // 1KB
for size := int64(0); size < 2*maxBytes; size += int64(len(value)) {
pArgs = putArgs([]byte(fmt.Sprintf("key-%d", size)), value)
if _, err := client.SendWrapped(context.Background(), rg1(mtc.stores[0]), pArgs); err != nil {
t.Fatal(err)
}
}
// Sadly, occasionally the queue has a race with the force processing so
// this succeeds within will captures those rare cases.
var afterTruncationIndex uint64
testutils.SucceedsSoon(t, func() error {
// Force a truncation check.
for _, store := range mtc.stores {
store.ForceRaftLogScanAndProcess()
}
// Ensure that firstIndex has increased indicating that the log
// truncation has occurred.
var err error
afterTruncationIndex, err = raftLeaderRepl.GetFirstIndex()
if err != nil {
t.Fatal(err)
}
if afterTruncationIndex <= originalIndex {
return errors.Errorf("raft log has not been truncated yet, afterTruncationIndex:%d originalIndex:%d",
afterTruncationIndex, originalIndex)
}
return nil
})
// Force a truncation check again to ensure that attempting to truncate an
// already truncated log has no effect. This check, unlike in the last
// iteration, cannot use a succeedsSoon. This check is fragile in that the
// truncation triggered here may lose the race against the call to
// GetFirstIndex, giving a false negative. Fixing this requires additional
// instrumentation of the queues, which was deemed to require too much work
// at the time of this writing.
for _, store := range mtc.stores {
store.ForceRaftLogScanAndProcess()
}
after2ndTruncationIndex, err := raftLeaderRepl.GetFirstIndex()
if err != nil {
t.Fatal(err)
}
if afterTruncationIndex > after2ndTruncationIndex {
t.Fatalf("second truncation destroyed state: afterTruncationIndex:%d after2ndTruncationIndex:%d",
afterTruncationIndex, after2ndTruncationIndex)
}
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:98,代码来源:client_raft_log_queue_test.go
示例17: TestTxnPutOutOfOrder
// TestTxnPutOutOfOrder tests a case where a put operation of an older
// timestamp comes after a put operation of a newer timestamp in a
// txn. The test ensures such an out-of-order put succeeds and
// overrides an old value. The test uses a "Writer" and a "Reader"
// to reproduce an out-of-order put.
//
// 1) The Writer executes a put operation and writes a write intent with
// time T in a txn.
// 2) Before the Writer's txn is committed, the Reader sends a high priority
// get operation with time T+100. This pushes the Writer txn timestamp to
// T+100 and triggers the restart of the Writer's txn. The original
// write intent timestamp is also updated to T+100.
// 3) The Writer starts a new epoch of the txn, but before it writes, the
// Reader sends another high priority get operation with time T+200. This
// pushes the Writer txn timestamp to T+200 to trigger a restart of the
// Writer txn. The Writer will not actually restart until it tries to commit
// the current epoch of the transaction. The Reader updates the timestamp of
// the write intent to T+200. The test deliberately fails the Reader get
// operation, and cockroach doesn't update its read timestamp cache.
// 4) The Writer executes the put operation again. This put operation comes
// out-of-order since its timestamp is T+100, while the intent timestamp
// updated at Step 3 is T+200.
// 5) The put operation overrides the old value using timestamp T+100.
// 6) When the Writer attempts to commit its txn, the txn will be restarted
// again at a new epoch timestamp T+200, which will finally succeed.
func TestTxnPutOutOfOrder(t *testing.T) {
defer leaktest.AfterTest(t)()
const key = "key"
// Set up a filter to so that the get operation at Step 3 will return an error.
var numGets int32
stopper := stop.NewStopper()
defer stopper.Stop()
manual := hlc.NewManualClock(123)
cfg := storage.TestStoreConfig(hlc.NewClock(manual.UnixNano, time.Nanosecond))
cfg.TestingKnobs.TestingCommandFilter =
func(filterArgs storagebase.FilterArgs) *roachpb.Error {
if _, ok := filterArgs.Req.(*roachpb.GetRequest); ok &&
filterArgs.Req.Header().Key.Equal(roachpb.Key(key)) &&
filterArgs.Hdr.Txn == nil {
// The Reader executes two get operations, each of which triggers two get requests
// (the first request fails and triggers txn push, and then the second request
// succeeds). Returns an error for the fourth get request to avoid timestamp cache
// update after the third get operation pushes the txn timestamp.
if atomic.AddInt32(&numGets, 1) == 4 {
return roachpb.NewErrorWithTxn(errors.Errorf("Test"), filterArgs.Hdr.Txn)
}
}
return nil
}
eng := engine.NewInMem(roachpb.Attributes{}, 10<<20)
stopper.AddCloser(eng)
store := createTestStoreWithEngine(t,
eng,
true,
cfg,
stopper,
)
// Put an initial value.
initVal := []byte("initVal")
err := store.DB().Put(context.TODO(), key, initVal)
if err != nil {
t.Fatalf("failed to put: %s", err)
}
waitPut := make(chan struct{})
waitFirstGet := make(chan struct{})
waitTxnRestart := make(chan struct{})
waitSecondGet := make(chan struct{})
waitTxnComplete := make(chan struct{})
// Start the Writer.
go func() {
epoch := -1
// Start a txn that does read-after-write.
// The txn will be restarted twice, and the out-of-order put
// will happen in the second epoch.
if err := store.DB().Txn(context.TODO(), func(txn *client.Txn) error {
epoch++
if epoch == 1 {
// Wait until the second get operation is issued.
close(waitTxnRestart)
<-waitSecondGet
}
updatedVal := []byte("updatedVal")
if err := txn.Put(key, updatedVal); err != nil {
return err
}
// Make sure a get will return the value that was just written.
actual, err := txn.Get(key)
if err != nil {
return err
}
if !bytes.Equal(actual.ValueBytes(), updatedVal) {
t.Fatalf("unexpected get result: %s", actual)
//.........这里部分代码省略.........
开发者ID:bdarnell,项目名称:cockroach,代码行数:101,代码来源:client_replica_test.go
示例18: TestTimeSeriesMainten |
请发表评论