本文整理汇总了Golang中github.com/cockroachdb/cockroach/gossip.MakePrefixPattern函数的典型用法代码示例。如果您正苦于以下问题:Golang MakePrefixPattern函数的具体用法?Golang MakePrefixPattern怎么用?Golang MakePrefixPattern使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MakePrefixPattern函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: NewStorePool
// NewStorePool creates a StorePool and registers the store updating callback
// with gossip.
func NewStorePool(
g *gossip.Gossip,
clock *hlc.Clock,
rpcContext *rpc.Context,
reservationsEnabled bool,
timeUntilStoreDead time.Duration,
stopper *stop.Stopper,
) *StorePool {
sp := &StorePool{
clock: clock,
timeUntilStoreDead: timeUntilStoreDead,
rpcContext: rpcContext,
reservationsEnabled: reservationsEnabled,
failedReservationsTimeout: envutil.EnvOrDefaultDuration("failed_reservation_timeout",
defaultFailedReservationsTimeout),
declinedReservationsTimeout: envutil.EnvOrDefaultDuration("declined_reservation_timeout",
defaultDeclinedReservationsTimeout),
reserveRPCTimeout: envutil.EnvOrDefaultDuration("reserve_rpc_timeout",
defaultReserveRPCTimeout),
resolver: GossipAddressResolver(g),
}
sp.mu.stores = make(map[roachpb.StoreID]*storeDetail)
heap.Init(&sp.mu.queue)
storeRegex := gossip.MakePrefixPattern(gossip.KeyStorePrefix)
g.RegisterCallback(storeRegex, sp.storeGossipUpdate)
deadReplicasRegex := gossip.MakePrefixPattern(gossip.KeyDeadReplicasPrefix)
g.RegisterCallback(deadReplicasRegex, sp.deadReplicasGossipUpdate)
sp.start(stopper)
return sp
}
开发者ID:yangxuanjia,项目名称:cockroach,代码行数:33,代码来源:store_pool.go
示例2: waitForStores
// waitForStores waits for all of the store descriptors to be gossiped. Servers
// other than the first "bootstrap" their stores asynchronously, but we'd like
// to wait for all of the stores to be initialized before returning the
// TestCluster.
func (tc *TestCluster) waitForStores(t testing.TB) {
// Register a gossip callback for the store descriptors.
g := tc.Servers[0].Gossip()
var storesMu sync.Mutex
stores := map[roachpb.StoreID]struct{}{}
storesDone := make(chan error)
unregister := g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyStorePrefix),
func(_ string, content roachpb.Value) {
var desc roachpb.StoreDescriptor
if err := content.GetProto(&desc); err != nil {
storesDone <- err
return
}
storesMu.Lock()
stores[desc.StoreID] = struct{}{}
if len(stores) == len(tc.Servers) {
close(storesDone)
}
storesMu.Unlock()
})
defer unregister()
// Wait for the store descriptors to be gossiped.
if err := <-storesDone; err != nil {
t.Fatal(err)
}
}
开发者ID:YuleiXiao,项目名称:cockroach,代码行数:31,代码来源:testcluster.go
示例3: TestStoreRangeReplicate
// TestStoreRangeReplicate verifies that the replication queue will notice
// under-replicated ranges and replicate them.
func TestStoreRangeReplicate(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := multiTestContext{}
mtc.Start(t, 3)
defer mtc.Stop()
// Initialize the gossip network.
var wg sync.WaitGroup
wg.Add(len(mtc.stores))
key := gossip.MakePrefixPattern(gossip.KeyCapacityPrefix)
mtc.stores[0].Gossip().RegisterCallback(key, func(_ string, _ bool) { wg.Done() })
for _, s := range mtc.stores {
s.GossipCapacity()
}
wg.Wait()
// Once we know our peers, trigger a scan.
mtc.stores[0].ForceReplicationScan(t)
// The range should become available on every node.
if err := util.IsTrueWithin(func() bool {
for _, s := range mtc.stores {
r := s.LookupRange(proto.Key("a"), proto.Key("b"))
if r == nil {
return false
}
}
return true
}, 1*time.Second); err != nil {
t.Fatal(err)
}
}
开发者ID:mingpengxiao,项目名称:cockroach,代码行数:34,代码来源:client_raft_test.go
示例4: TestStoreRangeUpReplicate
// TestStoreRangeUpReplicate verifies that the replication queue will notice
// under-replicated ranges and replicate them.
func TestStoreRangeUpReplicate(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 3)
defer mtc.Stop()
// Initialize the gossip network.
var wg sync.WaitGroup
wg.Add(len(mtc.stores))
key := gossip.MakePrefixPattern(gossip.KeyStorePrefix)
mtc.stores[0].Gossip().RegisterCallback(key, func(_ string, _ roachpb.Value) { wg.Done() })
for _, s := range mtc.stores {
s.GossipStore()
}
wg.Wait()
// Once we know our peers, trigger a scan.
mtc.stores[0].ForceReplicationScanAndProcess()
// The range should become available on every node.
util.SucceedsSoon(t, func() error {
for _, s := range mtc.stores {
r := s.LookupReplica(roachpb.RKey("a"), roachpb.RKey("b"))
if r == nil {
return util.Errorf("expected replica for keys \"a\" - \"b\"")
}
}
return nil
})
}
开发者ID:liugangnhm,项目名称:cockroach,代码行数:31,代码来源:client_raft_test.go
示例5: TestStoreRangeUpReplicate
// TestStoreRangeUpReplicate verifies that the replication queue will notice
// under-replicated ranges and replicate them.
func TestStoreRangeUpReplicate(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := startMultiTestContext(t, 3)
defer mtc.Stop()
// Initialize the gossip network.
var wg sync.WaitGroup
wg.Add(len(mtc.stores))
key := gossip.MakePrefixPattern(gossip.KeyStorePrefix)
mtc.stores[0].Gossip().RegisterCallback(key, func(_ string, _ roachpb.Value) { wg.Done() })
for _, s := range mtc.stores {
s.GossipStore()
}
wg.Wait()
// Once we know our peers, trigger a scan.
mtc.stores[0].ForceReplicationScanAndProcess()
// The range should become available on every node.
if err := util.IsTrueWithin(func() bool {
for _, s := range mtc.stores {
r := s.LookupReplica(roachpb.RKey("a"), roachpb.RKey("b"))
if r == nil {
return false
}
}
return true
}, replicationTimeout); err != nil {
t.Fatal(err)
}
}
开发者ID:harryge00,项目名称:cockroach,代码行数:33,代码来源:client_raft_test.go
示例6: newStoreGossiper
func newStoreGossiper(g *gossip.Gossip) *storeGossiper {
sg := &storeGossiper{
g: g,
}
g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyStorePrefix), func(_ string, _ []byte) { sg.wg.Done() })
return sg
}
开发者ID:Eric-Gaudiello,项目名称:cockroach,代码行数:7,代码来源:allocator_test.go
示例7: newReplicateQueue
// newReplicateQueue returns a new instance of replicateQueue.
func newReplicateQueue(store *Store, g *gossip.Gossip, allocator Allocator, clock *hlc.Clock,
options AllocatorOptions) *replicateQueue {
rq := &replicateQueue{
allocator: allocator,
clock: clock,
updateChan: make(chan struct{}, 1),
}
rq.baseQueue = makeBaseQueue("replicate", rq, store, g, queueConfig{
maxSize: replicateQueueMaxSize,
needsLease: true,
acceptsUnsplitRanges: false,
})
if g != nil { // gossip is nil for some unittests
// Register a gossip callback to signal queue that replicas in
// purgatory might be retried due to new store gossip.
g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyStorePrefix), func(_ string, _ roachpb.Value) {
select {
case rq.updateChan <- struct{}{}:
default:
}
})
}
return rq
}
开发者ID:yangxuanjia,项目名称:cockroach,代码行数:27,代码来源:replicate_queue.go
示例8: newAllocator
// newAllocator creates a new allocator using the specified gossip.
func newAllocator(g *gossip.Gossip) *allocator {
a := &allocator{
gossip: g,
randGen: rand.New(rand.NewSource(rand.Int63())),
}
// Callback triggers on any capacity gossip updates.
if a.gossip != nil {
capacityRegex := gossip.MakePrefixPattern(gossip.KeyCapacityPrefix)
a.gossip.RegisterCallback(capacityRegex, a.capacityGossipUpdate)
}
return a
}
开发者ID:huaxling,项目名称:cockroach,代码行数:13,代码来源:allocator.go
示例9: newStoreGossiper
// newStoreGossiper creates a store gossiper for use by tests. It adds the
// callback to gossip.
func newStoreGossiper(g *gossip.Gossip) *storeGossiper {
sg := &storeGossiper{
g: g,
storeKeyMap: make(map[string]struct{}),
}
g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyStorePrefix), func(key string, _ []byte) {
sg.mu.Lock()
defer sg.mu.Unlock()
if _, ok := sg.storeKeyMap[key]; ok {
sg.wg.Done()
}
})
return sg
}
开发者ID:kangxinrong,项目名称:cockroach,代码行数:16,代码来源:allocator_test.go
示例10: NewStorePool
// NewStorePool creates a StorePool and registers the store updating callback
// with gossip.
func NewStorePool(g *gossip.Gossip, timeUntilStoreDead time.Duration, stopper *stop.Stopper) *StorePool {
sp := &StorePool{
timeUntilStoreDead: timeUntilStoreDead,
stores: make(map[roachpb.StoreID]*storeDetail),
}
heap.Init(&sp.queue)
storeRegex := gossip.MakePrefixPattern(gossip.KeyStorePrefix)
g.RegisterCallback(storeRegex, sp.storeGossipUpdate)
sp.start(stopper)
return sp
}
开发者ID:rissoa,项目名称:cockroach,代码行数:16,代码来源:store_pool.go
示例11: NewStoreGossiper
// NewStoreGossiper creates a store gossiper for use by tests. It adds the
// callback to gossip.
func NewStoreGossiper(g *gossip.Gossip) *StoreGossiper {
sg := &StoreGossiper{
g: g,
storeKeyMap: make(map[string]struct{}),
}
sg.cond = sync.NewCond(&sg.mu)
g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyStorePrefix), func(key string, _ roachpb.Value) {
sg.mu.Lock()
defer sg.mu.Unlock()
delete(sg.storeKeyMap, key)
sg.cond.Broadcast()
})
return sg
}
开发者ID:yangxuanjia,项目名称:cockroach,代码行数:16,代码来源:store_gossiper.go
示例12: gossipStores
func gossipStores(g *gossip.Gossip, stores []*proto.StoreDescriptor, t *testing.T) {
var wg sync.WaitGroup
wg.Add(len(stores))
g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyCapacityPrefix), func(_ string, _ bool) { wg.Done() })
for _, s := range stores {
keyMaxCapacity := gossip.MakeCapacityKey(s.Node.NodeID, s.StoreID)
// Gossip store descriptor.
err := g.AddInfo(keyMaxCapacity, *s, 0)
if err != nil {
t.Fatal(err)
}
}
// Wait for all gossip callbacks to be invoked.
wg.Wait()
}
开发者ID:Hellblazer,项目名称:cockroach,代码行数:17,代码来源:allocator_test.go
示例13: Example_rebalancing
func Example_rebalancing() {
stopper := stop.NewStopper()
defer stopper.Stop()
// Model a set of stores in a cluster,
// randomly adding / removing stores and adding bytes.
g := gossip.New(nil, nil, stopper)
// Have to call g.SetNodeID before call g.AddInfo
g.SetNodeID(roachpb.NodeID(1))
sp := NewStorePool(
g,
hlc.NewClock(hlc.UnixNano),
nil,
/* reservationsEnabled */ true,
TestTimeUntilStoreDeadOff,
stopper,
)
alloc := MakeAllocator(sp, AllocatorOptions{AllowRebalance: true, Deterministic: true})
var wg sync.WaitGroup
g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyStorePrefix), func(_ string, _ roachpb.Value) { wg.Done() })
const generations = 100
const nodes = 20
// Initialize testStores.
var testStores [nodes]testStore
for i := 0; i < len(testStores); i++ {
testStores[i].StoreID = roachpb.StoreID(i)
testStores[i].Node = roachpb.NodeDescriptor{NodeID: roachpb.NodeID(i)}
testStores[i].Capacity = roachpb.StoreCapacity{Capacity: 1 << 30, Available: 1 << 30}
}
// Initialize the cluster with a single range.
testStores[0].add(alloc.randGen.Int63n(1 << 20))
for i := 0; i < generations; i++ {
// First loop through test stores and add data.
wg.Add(len(testStores))
for j := 0; j < len(testStores); j++ {
// Add a pretend range to the testStore if there's already one.
if testStores[j].Capacity.RangeCount > 0 {
testStores[j].add(alloc.randGen.Int63n(1 << 20))
}
if err := g.AddInfoProto(gossip.MakeStoreKey(roachpb.StoreID(j)), &testStores[j].StoreDescriptor, 0); err != nil {
panic(err)
}
}
wg.Wait()
// Next loop through test stores and maybe rebalance.
for j := 0; j < len(testStores); j++ {
ts := &testStores[j]
if alloc.ShouldRebalance(ts.StoreID) {
target := alloc.RebalanceTarget(ts.StoreID, roachpb.Attributes{}, []roachpb.ReplicaDescriptor{{NodeID: ts.Node.NodeID, StoreID: ts.StoreID}})
if target != nil {
testStores[j].rebalance(&testStores[int(target.StoreID)], alloc.randGen.Int63n(1<<20))
}
}
}
// Output store capacities as hexidecimal 2-character values.
if i%(generations/50) == 0 {
var maxBytes int64
for j := 0; j < len(testStores); j++ {
bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
if bytes > maxBytes {
maxBytes = bytes
}
}
if maxBytes > 0 {
for j := 0; j < len(testStores); j++ {
endStr := " "
if j == len(testStores)-1 {
endStr = ""
}
bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
fmt.Printf("%03d%s", (999*bytes)/maxBytes, endStr)
}
fmt.Printf("\n")
}
}
}
var totBytes int64
var totRanges int32
for i := 0; i < len(testStores); i++ {
totBytes += testStores[i].Capacity.Capacity - testStores[i].Capacity.Available
totRanges += testStores[i].Capacity.RangeCount
}
fmt.Printf("Total bytes=%d, ranges=%d\n", totBytes, totRanges)
// Output:
// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
// 999 000 000 000 000 000 000 000 000 000 045 140 000 000 000 000 000 105 000 000
// 999 014 143 000 000 000 000 039 017 000 112 071 000 088 009 000 097 134 000 151
// 999 196 213 000 000 000 143 098 210 039 262 260 077 139 078 087 237 316 281 267
//.........这里部分代码省略.........
开发者ID:csdigi,项目名称:cockroach,代码行数:101,代码来源:allocator_test.go
示例14: Example_rebalancing
func Example_rebalancing() {
// Model a set of stores in a cluster,
// randomly adding / removing stores and adding bytes.
g := gossip.New(nil, 0, nil)
alloc := newAllocator(g)
alloc.randGen = rand.New(rand.NewSource(0))
alloc.deterministic = true
var wg sync.WaitGroup
g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyStorePrefix), func(_ string, _ []byte) { wg.Done() })
const generations = 100
const nodes = 20
// Initialize testStores.
var testStores [nodes]testStore
for i := 0; i < len(testStores); i++ {
testStores[i].StoreID = proto.StoreID(i)
testStores[i].Node = proto.NodeDescriptor{NodeID: proto.NodeID(i)}
testStores[i].Capacity = proto.StoreCapacity{Capacity: 1 << 30, Available: 1 << 30}
}
// Initialize the cluster with a single range.
testStores[0].Add(alloc.randGen.Int63n(1 << 20))
for i := 0; i < generations; i++ {
// First loop through test stores and add data.
wg.Add(len(testStores))
for j := 0; j < len(testStores); j++ {
// Add a pretend range to the testStore if there's already one.
if testStores[j].Capacity.RangeCount > 0 {
testStores[j].Add(alloc.randGen.Int63n(1 << 20))
}
key := gossip.MakeStoreKey(proto.StoreID(j))
if err := g.AddInfoProto(key, &testStores[j].StoreDescriptor, 0); err != nil {
panic(err)
}
}
wg.Wait()
// Next loop through test stores and maybe rebalance.
for j := 0; j < len(testStores); j++ {
ts := &testStores[j]
if alloc.ShouldRebalance(&testStores[j].StoreDescriptor) {
target := alloc.RebalanceTarget(proto.Attributes{}, []proto.Replica{{NodeID: ts.Node.NodeID, StoreID: ts.StoreID}})
if target != nil {
testStores[j].Rebalance(&testStores[int(target.StoreID)], alloc.randGen.Int63n(1<<20))
}
}
}
// Output store capacities as hexidecimal 2-character values.
if i%(generations/50) == 0 {
var maxBytes int64
for j := 0; j < len(testStores); j++ {
bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
if bytes > maxBytes {
maxBytes = bytes
}
}
if maxBytes > 0 {
for j := 0; j < len(testStores); j++ {
endStr := " "
if j == len(testStores)-1 {
endStr = ""
}
bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
fmt.Printf("%03d%s", (999*bytes)/maxBytes, endStr)
}
fmt.Printf("\n")
}
}
}
var totBytes int64
var totRanges int32
for i := 0; i < len(testStores); i++ {
totBytes += testStores[i].Capacity.Capacity - testStores[i].Capacity.Available
totRanges += testStores[i].Capacity.RangeCount
}
fmt.Printf("Total bytes=%d, ranges=%d\n", totBytes, totRanges)
// Output:
// 999 000 000 000 000 000 000 739 000 000 000 000 000 000 000 000 000 000 000 000
// 999 107 000 000 204 000 000 375 000 000 000 000 000 000 000 000 000 000 536 000
// 999 310 000 262 872 000 000 208 000 705 000 526 000 000 439 000 000 607 933 000
// 812 258 000 220 999 673 402 480 000 430 516 374 000 431 318 000 551 714 917 000
// 582 625 185 334 720 589 647 619 000 300 483 352 279 502 208 665 816 684 999 374
// 751 617 771 542 738 676 665 525 309 435 612 449 457 616 306 837 993 754 999 445
// 759 659 828 478 693 622 594 591 349 458 630 538 526 613 462 827 879 787 999 550
// 861 658 828 559 801 660 681 560 487 529 652 686 642 716 575 999 989 875 989 581
// 775 647 724 557 779 662 670 494 535 502 681 676 624 695 561 961 999 772 888 592
// 856 712 753 661 767 658 717 606 529 615 755 699 672 700 576 955 999 755 861 671
// 882 735 776 685 844 643 740 578 610 688 787 741 661 767 587 999 955 809 803 731
// 958 716 789 719 861 689 821 608 634 724 800 782 694 799 619 994 999 851 812 818
// 949 726 788 664 873 633 749 599 680 714 790 728 663 842 628 999 978 816 823 791
// 923 698 792 712 816 605 774 651 661 728 802 718 670 819 714 999 966 801 829 791
// 962 779 847 737 900 675 811 691 745 778 835 812 680 894 790 999 989 872 923 799
// 967 812 826 772 891 685 828 683 761 808 864 820 643 873 783 969 999 873 910 781
// 923 813 837 739 867 672 792 664 773 772 879 803 610 845 740 957 999 867 912 732
// 952 803 866 759 881 655 765 668 803 772 929 762 601 844 751 973 999 892 864 731
//.........这里部分代码省略.........
开发者ID:Eric-Gaudiello,项目名称:cockroach,代码行数:101,代码来源:allocator_test.go
示例15: ExampleAllocatorRebalancing
// ExampleAllocatorRebalancing models a set of stores in a cluster,
// randomly adding / removing stores and adding bytes.
func ExampleAllocatorRebalancing() {
g := gossip.New(nil, 0, nil)
alloc := newAllocator(g)
alloc.randGen = rand.New(rand.NewSource(0))
alloc.deterministic = true
var wg sync.WaitGroup
g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyCapacityPrefix), func(_ string, _ bool) { wg.Done() })
const generations = 100
const nodes = 20
// Initialize testStores.
var testStores [nodes]testStore
for i := 0; i < len(testStores); i++ {
testStores[i].StoreID = proto.StoreID(i)
testStores[i].Node = proto.NodeDescriptor{NodeID: proto.NodeID(i)}
testStores[i].Capacity = proto.StoreCapacity{Capacity: 1 << 30, Available: 1 << 30}
}
// Initialize the cluster with a single range.
testStores[0].Add(alloc.randGen.Int63n(1 << 20))
for i := 0; i < generations; i++ {
// First loop through test stores and add data.
wg.Add(len(testStores))
for j := 0; j < len(testStores); j++ {
// Add a pretend range to the testStore if there's already one.
if testStores[j].Capacity.RangeCount > 0 {
testStores[j].Add(alloc.randGen.Int63n(1 << 20))
}
key := gossip.MakeCapacityKey(proto.NodeID(j), proto.StoreID(j))
if err := g.AddInfo(key, testStores[j].StoreDescriptor, 0); err != nil {
panic(err)
}
}
wg.Wait()
// Next loop through test stores and maybe rebalance.
for j := 0; j < len(testStores); j++ {
ts := &testStores[j]
if alloc.ShouldRebalance(&testStores[j].StoreDescriptor) {
target := alloc.RebalanceTarget(proto.Attributes{}, []proto.Replica{{NodeID: ts.Node.NodeID, StoreID: ts.StoreID}})
if target != nil {
testStores[j].Rebalance(&testStores[int(target.StoreID)], alloc.randGen.Int63n(1<<20))
}
}
}
// Output store capacities as hexidecimal 2-character values.
if i%(generations/50) == 0 {
var maxBytes int64
for j := 0; j < len(testStores); j++ {
bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
if bytes > maxBytes {
maxBytes = bytes
}
}
if maxBytes > 0 {
for j := 0; j < len(testStores); j++ {
endStr := " "
if j == len(testStores)-1 {
endStr = ""
}
bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
fmt.Printf("%03d%s", (999*bytes)/maxBytes, endStr)
}
fmt.Printf("\n")
}
}
}
var totBytes int64
var totRanges int32
for i := 0; i < len(testStores); i++ {
totBytes += testStores[i].Capacity.Capacity - testStores[i].Capacity.Available
totRanges += testStores[i].Capacity.RangeCount
}
fmt.Printf("Total bytes=%d, ranges=%d\n", totBytes, totRanges)
// Output:
// 999 000 000 000 000 000 000 739 000 000 000 000 000 000 000 000 000 000 000 000
// 999 000 000 000 204 000 000 375 000 000 107 000 000 000 000 000 000 000 000 536
// 942 000 000 463 140 000 000 646 000 288 288 000 442 000 058 647 000 000 316 999
// 880 000 412 630 365 745 445 565 122 407 380 570 276 000 271 709 000 718 299 999
// 925 000 667 600 555 975 704 552 272 491 773 890 584 000 407 974 000 930 476 999
// 990 967 793 579 493 999 698 453 616 608 777 755 709 425 455 984 483 698 267 931
// 965 999 869 606 635 908 630 585 567 577 818 870 740 621 550 868 805 790 411 913
// 953 995 990 624 617 947 562 609 670 658 909 952 835 851 641 958 924 999 526 987
// 999 923 901 571 687 915 636 636 674 685 831 881 847 820 702 905 897 983 509 981
// 999 884 809 585 691 826 640 572 748 641 754 887 758 848 643 927 865 897 541 956
// 999 856 891 594 691 745 602 615 766 663 814 834 719 886 733 925 882 911 593 926
// 999 890 900 653 707 759 642 697 771 732 851 858 748 869 842 953 903 928 655 923
// 999 924 909 696 748 797 693 689 806 766 841 902 705 897 874 914 913 916 730 892
// 999 948 892 704 740 821 685 656 859 772 893 911 690 878 824 935 928 941 741 860
// 999 948 931 697 770 782 697 666 893 761 944 869 658 902 816 925 923 983 742 831
// 999 878 901 736 750 737 677 647 869 731 930 825 631 880 775 947 949 930 687 810
// 999 890 910 764 778 757 709 663 849 777 964 837 672 891 814 978 944 946 721 868
// 985 895 968 806 791 791 720 694 883 819 999 847 652 888 790 995 950 947 692 843
//.........这里部分代码省略.........
开发者ID:Hellblazer,项目名称:cockroach,代码行数:101,代码来源:allocator_test.go
示例16: TestStoreRangeDownReplicate
// TestStoreRangeDownReplicate verifies that the replication queue will notice
// over-replicated ranges and remove replicas from them.
func TestStoreRangeDownReplicate(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := startMultiTestContext(t, 5)
defer mtc.Stop()
store0 := mtc.stores[0]
// Split off a range from the initial range for testing; there are
// complications if the metadata ranges are removed from store 1, this
// simplifies the test.
splitKey := roachpb.RKey("m")
rightKey := roachpb.RKey("z")
{
replica := store0.LookupReplica(roachpb.RKeyMin, nil)
mtc.replicateRange(replica.Desc().RangeID, 0, 1, 2)
desc := replica.Desc()
splitArgs := adminSplitArgs(splitKey, splitKey)
if _, err := replica.AdminSplit(splitArgs, desc); err != nil {
t.Fatal(err)
}
}
// Replicate the new range to all five stores.
replica := store0.LookupReplica(rightKey, nil)
desc := replica.Desc()
mtc.replicateRange(desc.RangeID, 0, 3, 4)
// Initialize the gossip network.
var wg sync.WaitGroup
wg.Add(len(mtc.stores))
key := gossip.MakePrefixPattern(gossip.KeyStorePrefix)
mtc.stores[0].Gossip().RegisterCallback(key, func(_ string, _ []byte) { wg.Done() })
for _, s := range mtc.stores {
s.GossipStore()
}
wg.Wait()
// storeIDset is used to compare the replica sets from different views (i.e.
// local range descriptors)
type storeIDset map[roachpb.StoreID]struct{}
makeStoreIDset := func(replicas []roachpb.ReplicaDescriptor) storeIDset {
idSet := make(storeIDset)
for _, r := range replicas {
idSet[r.StoreID] = struct{}{}
}
return idSet
}
// Function to see if the replication level of the new range has reached the
// expected equilibrium. If equilibrium has not been reached, this function
// returns the list of stores that *should* have a replica for the range.
checkReplication := func() (bool, storeIDset) {
// Query each store for a replica of the range, generating a real map of
// the replicas.
foundIDset := make(storeIDset)
foundLocalRangeDescs := make([]*roachpb.RangeDescriptor, 0, len(mtc.stores))
for _, s := range mtc.stores {
r := s.LookupReplica(splitKey, nil)
if r != nil {
foundLocalRangeDescs = append(foundLocalRangeDescs, r.Desc())
foundIDset[s.StoreID()] = struct{}{}
}
}
// Fail immediately if there are less than three replicas.
replicaCount := len(foundIDset)
if replicaCount < 3 {
t.Fatalf("Removed too many replicas; expected at least three replicas, found %d", replicaCount)
}
// Look up the official range descriptor, make sure it agrees with the
// found replicas.
realRangeDesc := getRangeMetadata(rightKey, mtc, t)
realIDset := makeStoreIDset(realRangeDesc.Replicas)
if !reflect.DeepEqual(realIDset, foundIDset) {
return false, realIDset
}
// Ensure the local range descriptors everywhere agree with reality.
for _, desc := range foundLocalRangeDescs {
localIDset := makeStoreIDset(desc.Replicas)
if !reflect.DeepEqual(localIDset, foundIDset) {
return false, realIDset
}
}
// If we have only three replicas, exit the loop.
if replicaCount == 3 {
return true, nil
}
return false, foundIDset
}
maxTimeout := time.After(10 * time.Second)
succeeded := false
for !succeeded {
select {
case <-maxTimeout:
t.Fatalf("Failed to achieve proper replication within 10 seconds")
//.........这里部分代码省略.........
开发者ID:nporsche,项目名称:cockroach,代码行数:101,代码来源:client_raft_test.go
示例17: Example_rebalancing
func Example_rebalancing() {
stopper := stop.NewStopper()
defer stopper.Stop()
// Model a set of stores in a cluster,
// randomly adding / removing stores and adding bytes.
rpcContext := rpc.NewContext(&base.Context{Insecure: true}, nil, stopper)
server := rpc.NewServer(rpcContext) // never started
g := gossip.New(context.Background(), rpcContext, server, nil, stopper, metric.NewRegistry())
// Have to call g.SetNodeID before call g.AddInfo
g.SetNodeID(roachpb.NodeID(1))
sp := NewStorePool(
g,
hlc.NewClock(hlc.UnixNano),
nil,
/* reservationsEnabled */ true,
TestTimeUntilStoreDeadOff,
stopper,
)
alloc := MakeAllocator(sp, AllocatorOptions{AllowRebalance: true, Deterministic: true})
var wg sync.WaitGroup
g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyStorePrefix), func(_ string, _ roachpb.Value) { wg.Done() })
const generations = 100
const nodes = 20
// Initialize testStores.
var testStores [nodes]testStore
for i := 0; i < len(testStores); i++ {
testStores[i].StoreID = roachpb.StoreID(i)
testStores[i].Node = roachpb.NodeDescriptor{NodeID: roachpb.NodeID(i)}
testStores[i].Capacity = roachpb.StoreCapacity{Capacity: 1 << 30, Available: 1 << 30}
}
// Initialize the cluster with a single range.
testStores[0].add(alloc.randGen.Int63n(1 << 20))
for i := 0; i < generations; i++ {
// First loop through test stores and add data.
wg.Add(len(testStores))
for j := 0; j < len(testStores); j++ {
// Add a pretend range to the testStore if there's already one.
if testStores[j].Capacity.RangeCount > 0 {
testStores[j].add(alloc.randGen.Int63n(1 << 20))
}
if err := g.AddInfoProto(gossip.MakeStoreKey(roachpb.StoreID(j)), &testStores[j].StoreDescriptor, 0); err != nil {
panic(err)
}
}
wg.Wait()
// Next loop through test stores and maybe rebalance.
for j := 0; j < len(testStores); j++ {
ts := &testStores[j]
target := alloc.RebalanceTarget(
roachpb.Attributes{},
[]roachpb.ReplicaDescriptor{{NodeID: ts.Node.NodeID, StoreID: ts.StoreID}},
-1)
if target != nil {
testStores[j].rebalance(&testStores[int(target.StoreID)], alloc.randGen.Int63n(1<<20))
}
}
// Output store capacities as hexadecimal 2-character values.
if i%(generations/50) == 0 {
var maxBytes int64
for j := 0; j < len(testStores); j++ {
bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
if bytes > maxBytes {
maxBytes = bytes
}
}
if maxBytes > 0 {
for j := 0; j < len(testStores); j++ {
endStr := " "
if j == len(testStores)-1 {
endStr = ""
}
bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
fmt.Printf("%03d%s", (999*bytes)/maxBytes, endStr)
}
fmt.Printf("\n")
}
}
}
var totBytes int64
var totRanges int32
for i := 0; i < len(testStores); i++ {
totBytes += testStores[i].Capacity.Capacity - testStores[i].Capacity.Available
totRanges += testStores[i].Capacity.RangeCount
}
fmt.Printf("Total bytes=%d, ranges=%d\n", totBytes, totRanges)
// Output:
// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
//.........这里部分代码省略.........
开发者ID:yaojingguo,项目名称:cockroach,代码行数:101,代码来源:allocator_test.go
示例18: TestStoreRangeDownReplicate
// TestStoreRangeDownReplicate verifies that the replication queue will notice
// over-replicated ranges and remove replicas from them.
func TestStoreRangeDownReplicate(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := startMultiTestContext(t, 5)
defer mtc.Stop()
store0 := mtc.stores[0]
// Split off a range from the initial range for testing; there are
// complications if the metadata ranges are removed from store 1, this
// simplifies the test.
splitKey := proto.Key("m")
rightKey := proto.Key("z")
{
replica := store0.LookupReplica(proto.KeyMin, nil)
mtc.replicateRange(replica.Desc().RangeID, 0, 1, 2)
desc := replica.Desc()
splitArgs := adminSplitArgs(splitKey, splitKey, desc.RangeID, store0.StoreID())
if _, err := replica.AdminSplit(splitArgs, desc); err != nil {
t.Fatal(err)
}
}
// Replicate the new range to all five stores.
replica := store0.LookupReplica(rightKey, nil)
desc := replica.Desc()
mtc.replicateRange(desc.RangeID, 0, 3, 4)
// Initialize the gossip network.
var wg sync.WaitGroup
wg.Add(len(mtc.stores))
key := gossip.MakePrefixPattern(gossip.KeyStorePrefix)
mtc.stores[0].Gossip().RegisterCallback(key, func(_ string, _ []byte) { wg.Done() })
for _, s := range mtc.stores {
s.GossipStore()
}
wg.Wait()
// storeIDset is used to compare the replica sets from different views (i.e.
// local range descriptors)
type storeIDset map[proto.StoreID]struct{}
makeStoreIDset := func(replicas []proto.Replica) storeIDset {
idSet := make(storeIDset)
for _, r := range replicas {
idSet[r.StoreID] = struct{}{}
}
return idSet
}
// Function to get the current range descriptor for the target range.
getRangeMetadata := func() proto.RangeDescriptor {
// Calls to RangeLookup typically use inconsistent reads, but we
// want to do a consistent read here. This is important when we are
// considering one of the metadata ranges: we must not do an
// inconsistent lookup in our own copy of the range.
reply := proto.RangeLookupResponse{}
b := &client.Batch{}
b.InternalAddCall(proto.Call{
Args: &proto.RangeLookupRequest{
RequestHeader: proto.RequestHeader{
Key: keys.RangeMetaKey(rightKey),
},
MaxRanges: 1,
},
Reply: &reply,
})
if err := mtc.db.Run(b); err != nil {
t.Fatalf("error getting range metadata: %s", err.Error())
}
if len(reply.Ranges) != 1 {
t.Fatalf("expected 1 range descriptor, go %d", len(reply.Ranges))
}
return reply.Ranges[0]
}
// Function to see if the replication level of the new range has reached the
// expected equilibrium. If equilibrium has not been reached, this function
// returns the list of stores that *should* have a replica for the range.
checkReplication := func() (bool, storeIDset) {
// Query each store for a replica of the range, generating a real map of
// the replicas.
foundIDset := make(storeIDset)
foundLocalRangeDescs := make([]*proto.RangeDescriptor, 0, len(mtc.stores))
for _, s := range mtc.stores {
r := s.LookupReplica(splitKey, nil)
if r != nil {
foundLocalRangeDescs = append(foundLocalRangeDescs, r.Desc())
foundIDset[s.StoreID()] = struct{}{}
}
}
// Fail immediately if there are less than three replicas.
replicaCount := len(foundIDset)
if replicaCount < 3 {
t.Fatalf("Removed too many replicas; expected at least three replicas, found %d", replicaCount)
}
// Look up the official range descriptor, make sure it agrees with the
//.........这里部分代码省略.........
开发者ID:kangxinrong,项目名称:cockroach,代码行数:101,代码来源:client_raft_test.go
示例19: Example_rebalancing
func Example_rebalancing() {
// Model a set of stores in a cluster,
// randomly adding / removing stores and adding bytes.
g := gossip.New(nil, 0, nil)
stopper := stop.NewStopper()
defer stopper.Stop()
sp := NewStorePool(g, TestTimeUntilStoreDeadOff, stopper)
alloc := MakeAllocator(sp, RebalancingOptions{AllowRebalance: true, Deterministic: true})
alloc.randGen = rand.New(rand.NewSource(0))
var wg sync.WaitGroup
g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyStorePrefix), func(_ string, _ []byte) { wg.Done() })
const generations = 100
const nodes = 20
// Initialize testStores.
var testStores [nodes]testStore
for i := 0; i < len(testStores); i++ {
testStores[i].StoreID = roachpb.StoreID(i)
testStores[i].Node = roachpb.NodeDescriptor{NodeID: roachpb.NodeID(i)}
testStores[i].Capacity = roachpb.StoreCapacity{Capacity: 1 << 30, Available: 1 << 30}
}
// Initialize the cluster with a single range.
testStores[0].add(alloc.randGen.Int63n(1 << 20))
for i := 0; i < generations; i++ {
// First loop through test stores and add data.
wg.Add(len(testStores))
for j := 0; j < len(testStores); j++ {
// Add a pretend range to the testStore if there's already one.
if testStores[j].Capacity.RangeCount > 0 {
testStores[j].add(alloc.randGen.Int63n(1 << 20))
}
key := gossip.MakeStoreKey(roachpb.StoreID(j))
if err := g.AddInfoProto(key, &testStores[j].StoreDescriptor, 0); err != nil {
panic(err)
}
}
wg.Wait()
// Next loop through test stores and maybe rebalance.
for j := 0; j < len(testStores); j++ {
ts := &testStores[j]
if alloc.ShouldRebalance(ts.StoreID) {
target := alloc.RebalanceTarget(ts.StoreID, roachpb.Attributes{}, []roachpb.ReplicaDescriptor{{NodeID: ts.Node.NodeID, StoreID: ts.StoreID}})
if target != nil {
testStores[j].rebalance(&testStores[int(target.StoreID)], alloc.randGen.Int63n(1<<20))
}
}
}
// Output store capacities as hexidecimal 2-character values.
if i%(generations/50) == 0 {
var maxBytes int64
for j := 0; j < len(testStores); j++ {
bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
if bytes > maxBytes {
maxBytes = bytes
}
}
if maxBytes > 0 {
for j := 0; j < len(testStores); j++ {
endStr := " "
if j == len(testStores)-1 {
endStr = ""
}
bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
fmt.Printf("%03d%s", (999*bytes)/maxBytes, endStr)
}
fmt.Printf("\n")
}
}
}
var totBytes int64
var totRanges int32
for i := 0; i < len(testStores); i++ {
totBytes += testStores[i].Capacity.Capacity - testStores[i].Capacity.Available
totRanges += testStores[i].Capacity.RangeCount
}
fmt.Printf("Total bytes=%d, ranges=%d\n", totBytes, totRanges)
// Output:
// 138 000 000 000 000 000 000 999 000 000 000 000 000 000 000 000 000 000 000 000
// 922 319 000 000 000 239 000 999 000 000 000 000 000 214 073 000 000 000 190 000
// 999 505 480 000 634 352 421 644 212 331 396 144 000 242 419 275 000 000 727 028
// 999 678 908 705 350 558 549 714 651 824 895 694 000 373 610 490 372 106 492 796
// 932 701 763 999 660 706 571 702 787 945 848 678 062 692 762 413 603 252 513 882
// 937 656 875 984 734 717 676 685 910 895 847 841 349 754 864 463 722 377 655 999
// 885 701 805 999 647 744 802 659 778 834 830 725 569 761 922 587 684 458 693 935
// 813 650 709 931 583 733 843 619 793 881 768 658 565 713 956 598 733 594 656 999
// 873 727 721 999 544 812 848 666 817 943 831 658 556 769 927 554 799 733 670 869
// 937 765 827 999 543 875 907 670 929 997 913 768 621 853 922 618 878 832 733 937
// 902 819 744 988 547 904 922 688 879 999 812 710 554 789 890 591 808 865 658 932
// 870 873 846 997 596 937 899 765 864 969 855 751 577 824 951 579 858 908 653 999
// 880 833 856 999 640 918 932 774 920 930 869 739 686 784 853 553 885 941 685 986
// 874 797 808 999 645 925 928 781 920 956 859 762 678 761 819 627 899 941 725 959
// 886 801 835 999 638 984 927 825 968 958 860 760 813 716 800 638 908 908 798 945
// 860 840 836 973 634 999 944 834 977 923 848 769 846 728 836 605 865 915 781 896
//.........这里部分代码省略.........
开发者ID:nporsche,项目名称:cockroach,代码行数:101,代码来源:allocator_test.go
示例20: TestStoreRangeDownReplicate
// TestStoreRangeDownReplicate verifies that the replication queue will notice
// over-replicated ranges and remove replicas from them.
func TestStoreRangeDownReplicate(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := startMultiTestContext(t, 5)
defer mtc.Stop()
store
|
请发表评论