• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Golang keys.Addr函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Golang中github.com/cockroachdb/cockroach/keys.Addr函数的典型用法代码示例。如果您正苦于以下问题:Golang Addr函数的具体用法?Golang Addr怎么用?Golang Addr使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了Addr函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。

示例1: prev

// prev gives the right boundary of the union of all requests which don't
// affect keys larger than the given key.
// TODO(tschottdorf): again, better on BatchRequest itself, but can't pull
// 'keys' into 'roachpb'.
func prev(ba roachpb.BatchRequest, k roachpb.RKey) roachpb.RKey {
	candidate := roachpb.RKeyMin
	for _, union := range ba.Requests {
		h := union.GetInner().Header()
		addr := keys.Addr(h.Key)
		eAddr := keys.Addr(h.EndKey)
		if len(eAddr) == 0 {
			// Can probably avoid having to compute Next() here if
			// we're in the mood for some more complexity.
			eAddr = addr.Next()
		}
		if !eAddr.Less(k) {
			if !k.Less(addr) {
				// Range contains k, so won't be able to go lower.
				return k
			}
			// Range is disjoint from [KeyMin,k).
			continue
		}
		// We want the largest surviving candidate.
		if candidate.Less(addr) {
			candidate = addr
		}
	}
	return candidate
}
开发者ID:mbertschler,项目名称:cockroach,代码行数:30,代码来源:batch.go


示例2: TestReplicateRange

// TestReplicateRange verifies basic replication functionality by creating two stores
// and a range, replicating the range to the second store, and reading its data there.
func TestReplicateRange(t *testing.T) {
	defer leaktest.AfterTest(t)
	mtc := startMultiTestContext(t, 2)
	defer mtc.Stop()

	// Issue a command on the first node before replicating.
	incArgs := incrementArgs([]byte("a"), 5)
	if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
		t.Fatal(err)
	}

	rng, err := mtc.stores[0].GetReplica(1)
	if err != nil {
		t.Fatal(err)
	}

	if err := rng.ChangeReplicas(roachpb.ADD_REPLICA,
		roachpb.ReplicaDescriptor{
			NodeID:  mtc.stores[1].Ident.NodeID,
			StoreID: mtc.stores[1].Ident.StoreID,
		}, rng.Desc()); err != nil {
		t.Fatal(err)
	}
	// Verify no intent remains on range descriptor key.
	key := keys.RangeDescriptorKey(rng.Desc().StartKey)
	desc := roachpb.RangeDescriptor{}
	if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), key, mtc.stores[0].Clock().Now(), true, nil, &desc); !ok || err != nil {
		t.Fatalf("fetching range descriptor yielded %t, %s", ok, err)
	}
	// Verify that in time, no intents remain on meta addressing
	// keys, and that range descriptor on the meta records is correct.
	util.SucceedsWithin(t, 1*time.Second, func() error {
		meta2 := keys.Addr(keys.RangeMetaKey(roachpb.RKeyMax))
		meta1 := keys.Addr(keys.RangeMetaKey(meta2))
		for _, key := range []roachpb.RKey{meta2, meta1} {
			metaDesc := roachpb.RangeDescriptor{}
			if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), key.AsRawKey(), mtc.stores[0].Clock().Now(), true, nil, &metaDesc); !ok || err != nil {
				return util.Errorf("failed to resolve %s", key.AsRawKey())
			}
			if !reflect.DeepEqual(metaDesc, desc) {
				return util.Errorf("descs not equal: %+v != %+v", metaDesc, desc)
			}
		}
		return nil
	})

	// Verify that the same data is available on the replica.
	util.SucceedsWithin(t, replicaReadTimeout, func() error {
		getArgs := getArgs([]byte("a"))
		if reply, err := client.SendWrappedWith(rg1(mtc.stores[1]), nil, roachpb.Header{
			ReadConsistency: roachpb.INCONSISTENT,
		}, &getArgs); err != nil {
			return util.Errorf("failed to read data: %s", err)
		} else if e, v := int64(5), mustGetInt(reply.(*roachpb.GetResponse).Value); v != e {
			return util.Errorf("failed to read correct data: expected %d, got %d", e, v)
		}
		return nil
	})
}
开发者ID:harryge00,项目名称:cockroach,代码行数:61,代码来源:client_raft_test.go


示例3: metaKey

func metaKey(key roachpb.RKey) []byte {
	rk, err := keys.Addr(keys.RangeMetaKey(key))
	if err != nil {
		panic(err)
	}
	return rk
}
开发者ID:GitGoldie,项目名称:cockroach,代码行数:7,代码来源:addressing_test.go


示例4: prev

// prev gives the right boundary of the union of all requests which don't
// affect keys larger than the given key.
// TODO(tschottdorf): again, better on BatchRequest itself, but can't pull
// 'keys' into 'roachpb'.
func prev(ba roachpb.BatchRequest, k roachpb.RKey) (roachpb.RKey, error) {
	candidate := roachpb.RKeyMin
	for _, union := range ba.Requests {
		h := union.GetInner().Header()
		addr, err := keys.Addr(h.Key)
		if err != nil {
			return nil, err
		}
		eAddr, err := keys.AddrUpperBound(h.EndKey)
		if err != nil {
			return nil, err
		}
		if len(eAddr) == 0 {
			eAddr = addr.Next()
		}
		if !eAddr.Less(k) {
			if !k.Less(addr) {
				// Range contains k, so won't be able to go lower.
				return k, nil
			}
			// Range is disjoint from [KeyMin,k).
			continue
		}
		// We want the largest surviving candidate.
		if candidate.Less(addr) {
			candidate = addr
		}
	}
	return candidate, nil
}
开发者ID:CubeLite,项目名称:cockroach,代码行数:34,代码来源:batch.go


示例5: next

// next gives the left boundary of the union of all requests which don't
// affect keys less than the given key.
// TODO(tschottdorf): again, better on BatchRequest itself, but can't pull
// 'keys' into 'proto'.
func next(ba roachpb.BatchRequest, k roachpb.RKey) (roachpb.RKey, error) {
	candidate := roachpb.RKeyMax
	for _, union := range ba.Requests {
		h := union.GetInner().Header()
		addr, err := keys.Addr(h.Key)
		if err != nil {
			return nil, err
		}
		if addr.Less(k) {
			eAddr, err := keys.AddrUpperBound(h.EndKey)
			if err != nil {
				return nil, err
			}
			if k.Less(eAddr) {
				// Starts below k, but continues beyond. Need to stay at k.
				return k, nil
			}
			// Affects only [KeyMin,k).
			continue
		}
		// We want the smallest of the surviving candidates.
		if addr.Less(candidate) {
			candidate = addr
		}
	}
	return candidate, nil
}
开发者ID:CubeLite,项目名称:cockroach,代码行数:31,代码来源:batch.go


示例6: checkEndTransactionTrigger

// checkEndTransactionTrigger verifies that an EndTransactionRequest
// that includes intents for the SystemDB keys sets the proper trigger.
func checkEndTransactionTrigger(req roachpb.Request, _ roachpb.Header) error {
	args, ok := req.(*roachpb.EndTransactionRequest)
	if !ok {
		return nil
	}

	if !args.Commit {
		// This is a rollback: skip trigger verification.
		return nil
	}

	modifiedSpanTrigger := args.InternalCommitTrigger.GetModifiedSpanTrigger()
	modifiedSystemSpan := modifiedSpanTrigger != nil && modifiedSpanTrigger.SystemDBSpan

	var hasSystemKey bool
	for _, span := range args.IntentSpans {
		addr := keys.Addr(span.Key)
		if bytes.Compare(addr, keys.SystemDBSpan.Key) >= 0 && bytes.Compare(addr, keys.SystemDBSpan.EndKey) < 0 {
			hasSystemKey = true
			break
		}
	}
	if hasSystemKey != modifiedSystemSpan {
		return util.Errorf("EndTransaction hasSystemKey=%t, but hasSystemDBTrigger=%t",
			hasSystemKey, modifiedSystemSpan)
	}

	return nil
}
开发者ID:mbertschler,项目名称:cockroach,代码行数:31,代码来源:main_test.go


示例7: TestKeyAddress

func TestKeyAddress(t *testing.T) {
	defer leaktest.AfterTest(t)()
	testCases := []struct {
		key roachpb.Key
	}{
		{MakeNameMetadataKey(0, "BAR")},
		{MakeNameMetadataKey(1, "BAR")},
		{MakeNameMetadataKey(1, "foo")},
		{MakeNameMetadataKey(2, "foo")},
		{MakeDescMetadataKey(123)},
		{MakeDescMetadataKey(124)},
	}
	var lastKey roachpb.Key
	for i, test := range testCases {
		resultAddr, err := keys.Addr(test.key)
		if err != nil {
			t.Fatal(err)
		}
		result := resultAddr.AsRawKey()
		if result.Compare(lastKey) <= 0 {
			t.Errorf("%d: key address %q is <= %q", i, result, lastKey)
		}
		lastKey = result
	}
}
开发者ID:yangxuanjia,项目名称:cockroach,代码行数:25,代码来源:keys_test.go


示例8: TestObjectIDForKey

func TestObjectIDForKey(t *testing.T) {
	defer leaktest.AfterTest(t)

	testCases := []struct {
		key     roachpb.RKey
		success bool
		id      uint32
	}{
		// Before the structured span.
		{roachpb.RKeyMin, false, 0},
		{keys.Addr(keys.SystemMax), false, 0},

		// Boundaries of structured span.
		{keys.Addr(keys.TableDataPrefix), false, 0},
		{roachpb.RKeyMax, false, 0},

		// In system span, but no Uvarint ID.
		{keys.MakeKey(keys.TableDataPrefix, roachpb.RKey("foo")), false, 0},

		// Valid, even if there are things after the ID.
		{keys.MakeKey(keys.MakeTablePrefix(42), roachpb.RKey("foo")), true, 42},
		{keys.MakeTablePrefix(0), true, 0},
		{keys.MakeTablePrefix(999), true, 999},
	}

	for tcNum, tc := range testCases {
		id, success := config.ObjectIDForKey(tc.key)
		if success != tc.success {
			t.Errorf("#%d: expected success=%t", tcNum, tc.success)
			continue
		}
		if id != tc.id {
			t.Errorf("#%d: expected id=%d, got %d", tcNum, tc.id, id)
		}
	}
}
开发者ID:mbertschler,项目名称:cockroach,代码行数:36,代码来源:config_test.go


示例9: SplitRange

// SplitRange splits the range containing splitKey.
// The right range created by the split starts at the split key and extends to the
// original range's end key.
// Returns the new descriptors of the left and right ranges.
//
// splitKey must correspond to a SQL table key (it must end with a family ID /
// col ID).
func (tc *TestCluster) SplitRange(
	splitKey roachpb.Key,
) (*roachpb.RangeDescriptor, *roachpb.RangeDescriptor, error) {
	splitRKey, err := keys.Addr(splitKey)
	if err != nil {
		return nil, nil, err
	}
	origRangeDesc, err := tc.LookupRange(splitKey)
	if err != nil {
		return nil, nil, err
	}
	if origRangeDesc.StartKey.Equal(splitRKey) {
		return nil, nil, errors.Errorf(
			"cannot split range %+v at start key %q", origRangeDesc, splitKey)
	}
	splitReq := roachpb.AdminSplitRequest{
		Span: roachpb.Span{
			Key: splitKey,
		},
		SplitKey: splitKey,
	}
	_, pErr := client.SendWrapped(tc.Servers[0].GetDistSender(), nil, &splitReq)
	if pErr != nil {
		return nil, nil, errors.Errorf(
			"%q: split unexpected error: %s", splitReq.SplitKey, pErr)
	}

	leftRangeDesc := new(roachpb.RangeDescriptor)
	rightRangeDesc := new(roachpb.RangeDescriptor)
	if err := tc.Servers[0].DB().GetProto(
		keys.RangeDescriptorKey(origRangeDesc.StartKey), leftRangeDesc); err != nil {
		return nil, nil, errors.Wrap(err, "could not look up left-hand side descriptor")
	}
	// The split point might not be exactly the one we requested (it can be
	// adjusted slightly so we don't split in the middle of SQL rows). Update it
	// to the real point.
	splitRKey = leftRangeDesc.EndKey
	if err := tc.Servers[0].DB().GetProto(
		keys.RangeDescriptorKey(splitRKey), rightRangeDesc); err != nil {
		return nil, nil, errors.Wrap(err, "could not look up right-hand side descriptor")
	}
	return leftRangeDesc, rightRangeDesc, nil
}
开发者ID:yangxuanjia,项目名称:cockroach,代码行数:50,代码来源:testcluster.go


示例10: checkEndTransactionTrigger

// checkEndTransactionTrigger verifies that an EndTransactionRequest
// that includes intents for the SystemDB keys sets the proper trigger.
func checkEndTransactionTrigger(args storagebase.FilterArgs) *roachpb.Error {
	req, ok := args.Req.(*roachpb.EndTransactionRequest)
	if !ok {
		return nil
	}

	if !req.Commit {
		// This is a rollback: skip trigger verification.
		return nil
	}

	modifiedSpanTrigger := req.InternalCommitTrigger.GetModifiedSpanTrigger()
	modifiedSystemConfigSpan := modifiedSpanTrigger != nil && modifiedSpanTrigger.SystemConfigSpan

	var hasSystemKey bool
	for _, span := range req.IntentSpans {
		keyAddr, err := keys.Addr(span.Key)
		if err != nil {
			return roachpb.NewError(err)
		}
		if bytes.Compare(keyAddr, keys.SystemConfigSpan.Key) >= 0 &&
			bytes.Compare(keyAddr, keys.SystemConfigSpan.EndKey) < 0 {
			hasSystemKey = true
			break
		}
	}
	// If the transaction in question has intents in the system span, then
	// modifiedSystemConfigSpan should always be true. However, it is possible
	// for modifiedSystemConfigSpan to be set, even though no system keys are
	// present. This can occur with certain conditional DDL statements (e.g.
	// "CREATE TABLE IF NOT EXISTS"), which set the SystemConfigTrigger
	// aggressively but may not actually end up changing the system DB depending
	// on the current state.
	// For more information, see the related comment at the beginning of
	// planner.makePlan().
	if hasSystemKey && !modifiedSystemConfigSpan {
		return roachpb.NewError(util.Errorf("EndTransaction hasSystemKey=%t, but hasSystemConfigTrigger=%t",
			hasSystemKey, modifiedSystemConfigSpan))
	}

	return nil
}
开发者ID:JKhawaja,项目名称:cockroach,代码行数:44,代码来源:main_test.go


示例11: runLsRanges

func runLsRanges(cmd *cobra.Command, args []string) {
	if len(args) > 1 {
		mustUsage(cmd)
		return
	}

	var startKey roachpb.Key
	{
		k := roachpb.KeyMin.Next()
		if len(args) > 0 {
			k = roachpb.Key(args[0])
		}
		rk, err := keys.Addr(k)
		if err != nil {
			panic(err)
		}
		startKey = keys.RangeMetaKey(rk)
	}
	endKey := keys.Meta2Prefix.PrefixEnd()

	kvDB, stopper := makeDBClient()
	defer stopper.Stop()
	rows, err := kvDB.Scan(startKey, endKey, maxResults)
	if err != nil {
		panicf("scan failed: %s\n", err)
	}

	for _, row := range rows {
		desc := &roachpb.RangeDescriptor{}
		if err := row.ValueProto(desc); err != nil {
			panicf("%s: unable to unmarshal range descriptor\n", row.Key)
			continue
		}
		fmt.Printf("%s-%s [%d]\n", desc.StartKey, desc.EndKey, desc.RangeID)
		for i, replica := range desc.Replicas {
			fmt.Printf("\t%d: node-id=%d store-id=%d\n",
				i, replica.NodeID, replica.StoreID)
		}
	}
	fmt.Printf("%d result(s)\n", len(rows))
}
开发者ID:GitGoldie,项目名称:cockroach,代码行数:41,代码来源:range.go


示例12: doLookupWithToken

func doLookupWithToken(
	t *testing.T,
	rc *rangeDescriptorCache,
	key string,
	evictToken *evictionToken,
	considerIntents bool,
	useReverseScan bool,
	wg *sync.WaitGroup,
) (*roachpb.RangeDescriptor, *evictionToken) {
	r, returnToken, pErr := rc.lookupRangeDescriptorInternal(
		context.Background(), roachpb.RKey(key), evictToken, considerIntents, useReverseScan, wg)
	if pErr != nil {
		t.Fatalf("Unexpected error from LookupRangeDescriptor: %s", pErr)
	}
	keyAddr, err := keys.Addr(roachpb.Key(key))
	if err != nil {
		t.Fatal(err)
	}
	if (useReverseScan && !r.ContainsExclusiveEndKey(keyAddr)) || (!useReverseScan && !r.ContainsKey(keyAddr)) {
		t.Fatalf("Returned range did not contain key: %s-%s, %s", r.StartKey, r.EndKey, key)
	}
	return r, returnToken
}
开发者ID:yangxuanjia,项目名称:cockroach,代码行数:23,代码来源:range_cache_test.go


示例13: TestStoreRangeSplitIdempotency

// TestStoreRangeSplit executes a split of a range and verifies that the
// resulting ranges respond to the right key ranges and that their stats
// and sequence cache have been properly accounted for.
func TestStoreRangeSplitIdempotency(t *testing.T) {
	defer leaktest.AfterTest(t)
	store, stopper := createTestStore(t)
	defer stopper.Stop()
	rangeID := roachpb.RangeID(1)
	splitKey := roachpb.Key("m")
	content := roachpb.Key("asdvb")

	// First, write some values left and right of the proposed split key.
	pArgs := putArgs([]byte("c"), content)
	if _, err := client.SendWrapped(rg1(store), nil, &pArgs); err != nil {
		t.Fatal(err)
	}
	pArgs = putArgs([]byte("x"), content)
	if _, err := client.SendWrapped(rg1(store), nil, &pArgs); err != nil {
		t.Fatal(err)
	}

	// Increments are a good way of testing the sequence cache. Up here, we
	// address them to the original range, then later to the one that contains
	// the key.
	txn := roachpb.NewTransaction("test", []byte("c"), 10, roachpb.SERIALIZABLE,
		store.Clock().Now(), 0)
	lIncArgs := incrementArgs([]byte("apoptosis"), 100)
	if _, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
		Txn: txn,
	}, &lIncArgs); err != nil {
		t.Fatal(err)
	}
	rIncArgs := incrementArgs([]byte("wobble"), 10)
	txn.Sequence++
	if _, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
		Txn: txn,
	}, &rIncArgs); err != nil {
		t.Fatal(err)
	}

	// Get the original stats for key and value bytes.
	var ms engine.MVCCStats
	if err := engine.MVCCGetRangeStats(store.Engine(), rangeID, &ms); err != nil {
		t.Fatal(err)
	}
	keyBytes, valBytes := ms.KeyBytes, ms.ValBytes

	// Split the range.
	args := adminSplitArgs(roachpb.KeyMin, splitKey)
	if _, err := client.SendWrapped(rg1(store), nil, &args); err != nil {
		t.Fatal(err)
	}

	// Verify no intents remains on range descriptor keys.
	for _, key := range []roachpb.Key{keys.RangeDescriptorKey(roachpb.RKeyMin), keys.RangeDescriptorKey(keys.Addr(splitKey))} {
		if _, _, err := engine.MVCCGet(store.Engine(), key, store.Clock().Now(), true, nil); err != nil {
			t.Fatal(err)
		}
	}

	rng := store.LookupReplica(roachpb.RKeyMin, nil)
	newRng := store.LookupReplica([]byte("m"), nil)
	if !bytes.Equal(newRng.Desc().StartKey, splitKey) || !bytes.Equal(splitKey, rng.Desc().EndKey) {
		t.Errorf("ranges mismatched, wanted %q=%q=%q", newRng.Desc().StartKey, splitKey, rng.Desc().EndKey)
	}
	if !bytes.Equal(newRng.Desc().EndKey, roachpb.RKeyMax) || !bytes.Equal(rng.Desc().StartKey, roachpb.RKeyMin) {
		t.Errorf("new ranges do not cover KeyMin-KeyMax, but only %q-%q", rng.Desc().StartKey, newRng.Desc().EndKey)
	}

	// Try to get values from both left and right of where the split happened.
	gArgs := getArgs([]byte("c"))
	if reply, err := client.SendWrapped(rg1(store), nil, &gArgs); err != nil {
		t.Fatal(err)
	} else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil {
		t.Fatal(err)
	} else if !bytes.Equal(replyBytes, content) {
		t.Fatalf("actual value %q did not match expected value %q", replyBytes, content)
	}
	gArgs = getArgs([]byte("x"))
	if reply, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
		RangeID: newRng.Desc().RangeID,
	}, &gArgs); err != nil {
		t.Fatal(err)
	} else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil {
		t.Fatal(err)
	} else if !bytes.Equal(replyBytes, content) {
		t.Fatalf("actual value %q did not match expected value %q", replyBytes, content)
	}

	// Send out an increment request copied from above (same txn/sequence)
	// which remains in the old range.
	_, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
		Txn: txn,
	}, &lIncArgs)
	if _, ok := err.(*roachpb.TransactionRetryError); !ok {
		t.Fatalf("unexpected sequence cache miss: %v", err)
	}

	// Send out the same increment copied from above (same txn/sequence), but
	// now to the newly created range (which should hold that key).
//.........这里部分代码省略.........
开发者ID:kaustubhkurve,项目名称:cockroach,代码行数:101,代码来源:client_split_test.go


示例14: meta

func meta(k roachpb.RKey) roachpb.RKey {
	return keys.Addr(keys.RangeMetaKey(k))
}
开发者ID:kaustubhkurve,项目名称:cockroach,代码行数:3,代码来源:range_cache.go


示例15: TestAcceptsUnsplitRanges

// TestAcceptsUnsplitRanges verifies that ranges that need to split are properly
// rejected when the queue has 'acceptsUnsplitRanges = false'.
func TestAcceptsUnsplitRanges(t *testing.T) {
	defer leaktest.AfterTest(t)
	g, stopper := gossipForTest(t)
	defer stopper.Stop()

	// This range can never be split due to zone configs boundaries.
	neverSplits := &Replica{}
	if err := neverSplits.setDesc(&roachpb.RangeDescriptor{
		RangeID:  1,
		StartKey: roachpb.RKeyMin,
		EndKey:   keys.Addr(keys.UserTableDataMin),
	}); err != nil {
		t.Fatal(err)
	}

	// This range will need to be split after user db/table entries are created.
	willSplit := &Replica{}
	if err := willSplit.setDesc(&roachpb.RangeDescriptor{
		RangeID:  2,
		StartKey: keys.Addr(keys.UserTableDataMin),
		EndKey:   roachpb.RKeyMax,
	}); err != nil {
		t.Fatal(err)
	}

	var queued int32
	testQueue := &testQueueImpl{
		shouldQueueFn: func(now roachpb.Timestamp, r *Replica) (shouldQueue bool, priority float64) {
			// Always queue ranges if they make it past the base queue's logic.
			atomic.AddInt32(&queued, 1)
			return true, float64(r.Desc().RangeID)
		},
		acceptUnsplit: false,
	}

	bq := makeBaseQueue("test", testQueue, g, 2)
	mc := hlc.NewManualClock(0)
	clock := hlc.NewClock(mc.UnixNano)
	bq.Start(clock, stopper)

	// Check our config.
	sysCfg := g.GetSystemConfig()
	if sysCfg == nil {
		t.Fatal("nil config")
	}
	if sysCfg.NeedsSplit(neverSplits.Desc().StartKey, neverSplits.Desc().EndKey) {
		t.Fatal("System config says range needs to be split")
	}
	if sysCfg.NeedsSplit(willSplit.Desc().StartKey, willSplit.Desc().EndKey) {
		t.Fatal("System config says range needs to be split")
	}

	// There are no user db/table entries, everything should be added and
	// processed as usual.
	bq.MaybeAdd(neverSplits, roachpb.ZeroTimestamp)
	bq.MaybeAdd(willSplit, roachpb.ZeroTimestamp)

	if err := util.IsTrueWithin(func() bool {
		return atomic.LoadInt32(&testQueue.processed) == 2
	}, 250*time.Millisecond); err != nil {
		t.Error(err)
	}

	if pc := atomic.LoadInt32(&queued); pc != 2 {
		t.Errorf("expected queued count of 2; got %d", pc)
	}

	// Now add a user object, it will trigger a split.
	// The range willSplit starts at the beginning of the user data range,
	// which means keys.MaxReservedDescID+1.
	config.TestingSetZoneConfig(keys.MaxReservedDescID+2, &config.ZoneConfig{RangeMaxBytes: 1 << 20})

	// Check our config.
	if sysCfg.NeedsSplit(neverSplits.Desc().StartKey, neverSplits.Desc().EndKey) {
		t.Fatal("System config says range needs to be split")
	}
	if !sysCfg.NeedsSplit(willSplit.Desc().StartKey, willSplit.Desc().EndKey) {
		t.Fatal("System config says range does not need to be split")
	}

	bq.MaybeAdd(neverSplits, roachpb.ZeroTimestamp)
	bq.MaybeAdd(willSplit, roachpb.ZeroTimestamp)

	if err := util.IsTrueWithin(func() bool {
		return atomic.LoadInt32(&testQueue.processed) == 3
	}, 250*time.Millisecond); err != nil {
		t.Error(err)
	}

	if pc := atomic.LoadInt32(&queued); pc != 3 {
		t.Errorf("expected queued count of 3; got %d", pc)
	}
}
开发者ID:waldonhendricks,项目名称:cockroach,代码行数:95,代码来源:queue_test.go


示例16: TestGetZoneConfig

// TestGetZoneConfig exercises config.GetZoneConfig and the sql hook for it.
func TestGetZoneConfig(t *testing.T) {
	defer leaktest.AfterTest(t)
	// Disable splitting. We're using bad attributes in zone configs
	// to be able to match.
	defer config.TestingDisableTableSplits()()
	s, sqlDB, _ := setup(t)
	defer cleanup(s, sqlDB)

	expectedCounter := uint32(keys.MaxReservedDescID + 1)

	// Naming scheme for database and tables:
	// db1 has tables tb11 and tb12
	// db2 has tables tb21 and tb22

	db1 := expectedCounter
	if _, err := sqlDB.Exec(`CREATE DATABASE db1`); err != nil {
		t.Fatal(err)
	}
	expectedCounter++

	db2 := expectedCounter
	if _, err := sqlDB.Exec(`CREATE DATABASE db2`); err != nil {
		t.Fatal(err)
	}
	expectedCounter++

	tb11 := expectedCounter
	if _, err := sqlDB.Exec(`CREATE TABLE db1.tb1 (k INT PRIMARY KEY, v INT)`); err != nil {
		t.Fatal(err)
	}
	expectedCounter++

	tb12 := expectedCounter
	if _, err := sqlDB.Exec(`CREATE TABLE db1.tb2 (k INT PRIMARY KEY, v INT)`); err != nil {
		t.Fatal(err)
	}
	expectedCounter++

	tb21 := expectedCounter
	if _, err := sqlDB.Exec(`CREATE TABLE db2.tb1 (k INT PRIMARY KEY, v INT)`); err != nil {
		t.Fatal(err)
	}
	expectedCounter++

	tb22 := expectedCounter
	if _, err := sqlDB.Exec(`CREATE TABLE db2.tb2 (k INT PRIMARY KEY, v INT)`); err != nil {
		t.Fatal(err)
	}
	expectedCounter++

	cfg, err := forceNewConfig(t, s)
	if err != nil {
		t.Fatalf("failed to get latest system config: %s", err)
	}

	// We have no custom zone configs.
	testCases := []struct {
		key     roachpb.RKey
		zoneCfg config.ZoneConfig
	}{
		{roachpb.RKeyMin, *config.DefaultZoneConfig},
		{keys.Addr(keys.TableDataPrefix), *config.DefaultZoneConfig},
		{keys.MakeTablePrefix(1), *config.DefaultZoneConfig},
		{keys.MakeTablePrefix(keys.MaxReservedDescID), *config.DefaultZoneConfig},
		{keys.MakeTablePrefix(db1), *config.DefaultZoneConfig},
		{keys.MakeTablePrefix(db2), *config.DefaultZoneConfig},
		{keys.MakeTablePrefix(tb11), *config.DefaultZoneConfig},
		{keys.MakeTablePrefix(tb12), *config.DefaultZoneConfig},
		{keys.MakeTablePrefix(tb21), *config.DefaultZoneConfig},
		{keys.MakeTablePrefix(tb22), *config.DefaultZoneConfig},
	}

	for tcNum, tc := range testCases {
		zoneCfg, err := cfg.GetZoneConfigForKey(tc.key)
		if err != nil {
			t.Fatalf("#%d: err=%s", tcNum, err)
		}

		if !reflect.DeepEqual(*zoneCfg, tc.zoneCfg) {
			t.Errorf("#%d: bad zone config.\nexpected: %+v\ngot: %+v", tcNum, tc.zoneCfg, zoneCfg)
		}
	}

	// Now set some zone configs. We don't have a nice way of using table
	// names for this, so we do raw puts.
	// Here is the list of dbs/tables and whether they have a custom zone config:
	// db1: true
	//   tb1: true
	//   tb2: false
	// db1: false
	//   tb1: true
	//   tb2: false
	db1Cfg := config.ZoneConfig{ReplicaAttrs: []roachpb.Attributes{{[]string{"db1"}}}}
	tb11Cfg := config.ZoneConfig{ReplicaAttrs: []roachpb.Attributes{{[]string{"db1.tb1"}}}}
	tb21Cfg := config.ZoneConfig{ReplicaAttrs: []roachpb.Attributes{{[]string{"db2.tb1"}}}}
	for objID, objZone := range map[uint32]config.ZoneConfig{
		db1:  db1Cfg,
		tb11: tb11Cfg,
		tb21: tb21Cfg,
//.........这里部分代码省略.........
开发者ID:mbertschler,项目名称:cockroach,代码行数:101,代码来源:config_test.go


示例17: TestMultiRangeMergeStaleDescriptor

// TestMultiRangeMergeStaleDescriptor simulates the situation in which the
// DistSender executes a multi-range scan which encounters the stale descriptor
// of a range which has since incorporated its right neighbor by means of a
// merge. It is verified that the DistSender scans the correct keyrange exactly
// once.
func TestMultiRangeMergeStaleDescriptor(t *testing.T) {
	defer leaktest.AfterTest(t)
	g, s := makeTestGossip(t)
	defer s()
	// Assume we have two ranges, [a-b) and [b-KeyMax).
	merged := false
	// The stale first range descriptor which is unaware of the merge.
	var FirstRange = roachpb.RangeDescriptor{
		RangeID:  1,
		StartKey: roachpb.RKey("a"),
		EndKey:   roachpb.RKey("b"),
		Replicas: []roachpb.ReplicaDescriptor{
			{
				NodeID:  1,
				StoreID: 1,
			},
		},
	}
	// The merged descriptor, which will be looked up after having processed
	// the stale range [a,b).
	var mergedRange = roachpb.RangeDescriptor{
		RangeID:  1,
		StartKey: roachpb.RKey("a"),
		EndKey:   roachpb.RKeyMax,
		Replicas: []roachpb.ReplicaDescriptor{
			{
				NodeID:  1,
				StoreID: 1,
			},
		},
	}
	// Assume we have two key-value pairs, a=1 and c=2.
	existingKVs := []roachpb.KeyValue{
		{Key: roachpb.Key("a"), Value: roachpb.MakeValueFromString("1")},
		{Key: roachpb.Key("c"), Value: roachpb.MakeValueFromString("2")},
	}
	var testFn rpcSendFn = func(_ rpc.Options, method string, addrs []net.Addr, getArgs func(addr net.Addr) proto.Message, getReply func() proto.Message, _ *rpc.Context) ([]proto.Message, error) {
		if method != "Node.Batch" {
			t.Fatalf("unexpected method:%s", method)
		}
		ba := getArgs(testAddress).(*roachpb.BatchRequest)
		rs := keys.Range(*ba)
		batchReply := getReply().(*roachpb.BatchResponse)
		reply := &roachpb.ScanResponse{}
		batchReply.Add(reply)
		results := []roachpb.KeyValue{}
		for _, curKV := range existingKVs {
			if rs.Key.Less(keys.Addr(curKV.Key).Next()) && keys.Addr(curKV.Key).Less(rs.EndKey) {
				results = append(results, curKV)
			}
		}
		reply.Rows = results
		return []proto.Message{batchReply}, nil
	}
	ctx := &DistSenderContext{
		RPCSend: testFn,
		RangeDescriptorDB: mockRangeDescriptorDB(func(key roachpb.RKey, _, _ bool) ([]roachpb.RangeDescriptor, *roachpb.Error) {
			if !merged {
				// Assume a range merge operation happened.
				merged = true
				return []roachpb.RangeDescriptor{FirstRange}, nil
			}
			return []roachpb.RangeDescriptor{mergedRange}, nil
		}),
	}
	ds := NewDistSender(ctx, g)
	scan := roachpb.NewScan(roachpb.Key("a"), roachpb.Key("d"), 10).(*roachpb.ScanRequest)
	// Set the Txn info to avoid an OpRequiresTxnError.
	reply, err := client.SendWrappedWith(ds, nil, roachpb.Header{
		Txn: &roachpb.Transaction{},
	}, scan)
	if err != nil {
		t.Fatalf("scan encountered error: %s", err)
	}
	sr := reply.(*roachpb.ScanResponse)
	if !reflect.DeepEqual(existingKVs, sr.Rows) {
		t.Fatalf("expect get %v, actual get %v", existingKVs, sr.Rows)
	}
}
开发者ID:welfeng2016,项目名称:cockroach,代码行数:84,代码来源:dist_sender_test.go


示例18: TestStoreRangeDownReplicate

// TestStoreRangeDownReplicate verifies that the replication queue will notice
// over-replicated ranges and remove replicas from them.
func TestStoreRangeDownReplicate(t *testing.T) {
	defer leaktest.AfterTest(t)
	mtc := startMultiTestContext(t, 5)
	defer mtc.Stop()
	store0 := mtc.stores[0]

	// Split off a range from the initial range for testing; there are
	// complications if the metadata ranges are removed from store 1, this
	// simplifies the test.
	splitKey := roachpb.Key("m")
	rightKey := roachpb.Key("z")
	{
		replica := store0.LookupReplica(roachpb.RKeyMin, nil)
		mtc.replicateRange(replica.Desc().RangeID, 0, 1, 2)
		desc := replica.Desc()
		splitArgs := adminSplitArgs(splitKey, splitKey)
		if _, err := replica.AdminSplit(splitArgs, desc); err != nil {
			t.Fatal(err)
		}
	}
	// Replicate the new range to all five stores.
	replica := store0.LookupReplica(keys.Addr(rightKey), nil)
	desc := replica.Desc()
	mtc.replicateRange(desc.RangeID, 0, 3, 4)

	// Initialize the gossip network.
	var wg sync.WaitGroup
	wg.Add(len(mtc.stores))
	key := gossip.MakePrefixPattern(gossip.KeyStorePrefix)
	mtc.stores[0].Gossip().RegisterCallback(key, func(_ string, _ roachpb.Value) { wg.Done() })
	for _, s := range mtc.stores {
		s.GossipStore()
	}
	wg.Wait()

	// storeIDset is used to compare the replica sets from different views (i.e.
	// local range descriptors)
	type storeIDset map[roachpb.StoreID]struct{}
	makeStoreIDset := func(replicas []roachpb.ReplicaDescriptor) storeIDset {
		idSet := make(storeIDset)
		for _, r := range replicas {
			idSet[r.StoreID] = struct{}{}
		}
		return idSet
	}

	// Function to see if the replication level of the new range has reached the
	// expected equilibrium. If equilibrium has not been reached, this function
	// returns the list of stores that *should* have a replica for the range.
	checkReplication := func() (bool, storeIDset) {
		// Query each store for a replica of the range, generating a real map of
		// the replicas.
		foundIDset := make(storeIDset)
		foundLocalRangeDescs := make([]*roachpb.RangeDescriptor, 0, len(mtc.stores))
		for _, s := range mtc.stores {
			r := s.LookupReplica(keys.Addr(splitKey), nil)
			if r != nil {
				foundLocalRangeDescs = append(foundLocalRangeDescs, r.Desc())
				foundIDset[s.StoreID()] = struct{}{}
			}
		}

		// Fail immediately if there are less than three replicas.
		replicaCount := len(foundIDset)
		if replicaCount < 3 {
			t.Fatalf("Removed too many replicas; expected at least three replicas, found %d", replicaCount)
		}

		// Look up the official range descriptor, make sure it agrees with the
		// found replicas.
		realRangeDesc := getRangeMetadata(keys.Addr(rightKey), mtc, t)
		realIDset := makeStoreIDset(realRangeDesc.Replicas)
		if !reflect.DeepEqual(realIDset, foundIDset) {
			return false, realIDset
		}

		// Ensure the local range descriptors everywhere agree with reality.
		for _, desc := range foundLocalRangeDescs {
			localIDset := makeStoreIDset(desc.Replicas)
			if !reflect.DeepEqual(localIDset, foundIDset) {
				return false, realIDset
			}
		}

		// If we have only three replicas, exit the loop.
		if replicaCount == 3 {
			return true, nil
		}
		return false, foundIDset
	}

	maxTimeout := time.After(10 * time.Second)
	succeeded := false
	for !succeeded {
		select {
		case <-maxTimeout:
			t.Fatalf("Failed to achieve proper replication within 10 seconds")
		case <-time.After(10 * time.Millisecond):
//.........这里部分代码省略.........
开发者ID:senseb,项目名称:cockroach,代码行数:101,代码来源:client_raft_test.go


示例19: truncate

// truncate restricts all contained requests to the given key range
// and returns a new BatchRequest.
// All requests contained in that batch are "truncated" to the given
// span, inserting NoopRequest appropriately to replace requests which
// are left without a key range to operate on. The number of non-noop
// requests after truncation is returned.
func truncate(ba roachpb.BatchRequest, rs roachpb.RSpan) (roachpb.BatchRequest, int, error) {
	truncateOne := func(args roachpb.Request) (bool, roachpb.Span, error) {
		if _, ok := args.(*roachpb.NoopRequest); ok {
			return true, emptySpan, nil
		}
		header := *args.Header()
		if !roachpb.IsRange(args) {
			// This is a point request.
			if len(header.EndKey) > 0 {
				return false, emptySpan, util.Errorf("%T is not a range command, but EndKey is set", args)
			}
			if !rs.ContainsKey(keys.Addr(header.Key)) {
				return false, emptySpan, nil
			}
			return true, header, nil
		}
		// We're dealing with a range-spanning request.
		keyAddr, endKeyAddr := keys.Addr(header.Key), keys.Addr(header.EndKey)
		if l, r := !keyAddr.Equal(header.Key), !endKeyAddr.Equal(header.EndKey); l || r {
			if !rs.ContainsKeyRange(keyAddr, endKeyAddr) {
				return false, emptySpan, util.Errorf("local key range must not span ranges")
			}
			if !l || !r {
				return false, emptySpan, util.Errorf("local key mixed with global key in range")
			}
			// Range-local local key range.
			return true, header, nil
		}
		// Below, {end,}keyAddr equals header.{End,}Key, so nothing is local.
		if keyAddr.Less(rs.Key) {
			header.Key = rs.Key.AsRawKey() // "key" can't be local
			keyAddr = rs.Key
		}
		if !endKeyAddr.Less(rs.EndKey) {
			header.EndKey = rs.EndKey.AsRawKey() // "endKey" can't be local
			endKeyAddr = rs.EndKey
		}
		// Check whether the truncation has left any keys in the range. If not,
		// we need to cut it out of the request.
		if !keyAddr.Less(endKeyAddr) {
			return false, emptySpan, nil
		}
		return true, header, nil
	}

	var numNoop int
	origRequests := ba.Requests
	ba.Requests = make([]roachpb.RequestUnion, len(ba.Requests))
	for pos, arg := range origRequests {
		hasRequest, newHeader, err := truncateOne(arg.GetInner())
		if !hasRequest {
			// We omit this one, i.e. replace it with a Noop.
			numNoop++
			nReq := roachpb.RequestUnion{}
			if !nReq.SetValue(&roachpb.NoopRequest{}) {
				panic("RequestUnion excludes NoopRequest")
			}
			ba.Requests[pos] = nReq
		} else {
			// Keep the old one. If we must adjust the header, must copy.
			// TODO(tschottdorf): this could wind up cloning big chunks of data.
			// Can optimize by creating a new Request manually, but with the old
			// data.
			if newHeader.Equal(*origRequests[pos].GetInner().Header()) {
				ba.Requests[pos] = origRequests[pos]
			} else {
				ba.Requests[pos] = *proto.Clone(&origRequests[pos]).(*roachpb.RequestUnion)
				*ba.Requests[pos].GetInner().Header() = newHeader
			}
		}
		if err != nil {
			return roachpb.BatchRequest{}, 0, err
		}
	}
	return ba, len(ba.Requests) - numNoop, nil
}
开发者ID:kaustubhkurve,项目名称:cockroach,代码行数:82,代码来源:batch.go


示例20: truncate

// truncate restricts all contained requests to the given key range
// and returns a new BatchRequest.
// All requests contained in that batch are "truncated" to the given
// span, inserting NoopRequest appropriately to replace requests which
// are left without a key range to operate on. The number of non-noop
// requests after truncation is returned.
func truncate(ba roachpb.BatchRequest, rs roachpb.RSpan) (roachpb.BatchRequest, int, error) {
	truncateOne := func(args roachpb.Request) (bool, roachpb.Span, error) {
		if _, ok := args.(*roachpb.NoopRequest); ok {
			return true, emptySpan, nil
		}
		header := args.Header()
		if !roachpb.IsRange(args) {
			// This is a point request.
			if len(header.EndKey) > 0 {
				return false, emptySpan, errors.Errorf("%T is not a range command, but EndKey is set", args)
			}
			keyAddr, err := keys.Addr(header.Key)
			if err != nil {
				return false, emptySpan, err
			}
			if !rs.ContainsKey(keyAddr) {
				return false, emptySpan, nil
			}
			return true, header, nil
		}
		// We're dealing with a range-spanning request.
		local := false
		keyAddr, err := keys.Addr(header.Key)
		if err != nil {
			return false, emptySpan, err
		}
		endKeyAddr, err := keys.Addr(header.EndKey)
		if err != nil {
			return false, emptySpan, err
		}
		if l, r := !keyAddr.Equal(header.Key), !endKeyAddr.Equal(header.EndKey); l || r {
			if !l || !r {
				return false, emptySpan, errors.Errorf("local key mixed with global key in range")
			}
			local = true
		}
		if keyAddr.Less(rs.Key) {
			// rs.Key can't be local because it contains range split points, which
			// are never local.
			if !local {
				header.Key = rs.Key.AsRawKey()
			} else {
				// The local start key should be truncated to the boundary of local keys which
				// address to rs.Key.
				header.Key = keys.MakeRangeKeyPrefix(rs.Key)
			}
		}
		if !endKeyAddr.Less(rs.EndKey) {
			// rs.EndKey can't be local because it contains range split points, which
			// are never local.
			if !local {
				header.EndKey = rs.EndKey.AsRawKey()
			} else {
				// The local end key should be truncated to the boundary of local keys which
				// address to rs.EndKey.
				header.EndKey = keys.MakeRangeKeyPrefix(rs.EndKey)
			}
		}
		// Check whether the truncation has left any keys in the range. If not,
		// we need to cut it out of the request.
		if header.Key.Compare(header.EndKey) >= 0 {
			return false, emptySpan, nil
		}
		return true, header, nil
	}

	var numNoop int
	origRequests := ba.Requests
	ba.Requests = make([]roachpb.RequestUnion, len(ba.Requests))
	for pos, arg := range origRequests {
		hasRequest, newHeader, err := truncateOne(arg.GetInner())
		if !hasRequest {
			// We omit this one, i.e. replace it with a Noop.
			numNoop++
			union := roachpb.RequestUnion{}
			union.MustSetInner(&noopRequest)
			ba.Requests[pos] = union
		} else {
			// Keep the old one. If we must adjust the h 

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Golang keys.DecodeRangeKey函数代码示例发布时间:2022-05-23
下一篇:
Golang keys.AbortCacheKey函数代码示例发布时间:2022-05-23
热门推荐
热门话题
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap