本文整理汇总了Golang中github.com/cockroachdb/cockroach/pkg/acceptance/cluster.Cluster类的典型用法代码示例。如果您正苦于以下问题:Golang Cluster类的具体用法?Golang Cluster怎么用?Golang Cluster使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Cluster类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: postFreeze
func postFreeze(
c cluster.Cluster, freeze bool, timeout time.Duration,
) (serverpb.ClusterFreezeResponse, error) {
httpClient := cluster.HTTPClient
httpClient.Timeout = timeout
var resp serverpb.ClusterFreezeResponse
log.Infof(context.Background(), "requesting: freeze=%t, timeout=%s", freeze, timeout)
cb := func(v proto.Message) {
oldNum := resp.RangesAffected
resp = *v.(*serverpb.ClusterFreezeResponse)
if oldNum > resp.RangesAffected {
resp.RangesAffected = oldNum
}
if (resp != serverpb.ClusterFreezeResponse{}) {
log.Infof(context.Background(), "%+v", &resp)
}
}
err := httputil.StreamJSON(
httpClient,
c.URL(0)+"/_admin/v1/cluster/freeze",
&serverpb.ClusterFreezeRequest{Freeze: freeze},
&serverpb.ClusterFreezeResponse{},
cb,
)
return resp, err
}
开发者ID:knz,项目名称:cockroach,代码行数:27,代码来源:freeze_test.go
示例2: cutNetwork
func cutNetwork(t *testing.T, c cluster.Cluster, closer <-chan struct{}, partitions ...[]int) {
defer func() {
if errs := restoreNetwork(t, c); len(errs) > 0 {
t.Fatalf("errors restoring the network: %+v", errs)
}
}()
addrs, addrsToNode := mustGetHosts(t, c)
ipPartitions := make([][]iptables.IP, 0, len(partitions))
for _, partition := range partitions {
ipPartition := make([]iptables.IP, 0, len(partition))
for _, nodeIndex := range partition {
ipPartition = append(ipPartition, addrs[nodeIndex])
}
ipPartitions = append(ipPartitions, ipPartition)
}
log.Warningf(context.TODO(), "partitioning: %v (%v)", partitions, ipPartitions)
for host, cmds := range iptables.Rules(iptables.Bidirectional(ipPartitions...)) {
for _, cmd := range cmds {
if err := c.ExecRoot(addrsToNode[host], cmd); err != nil {
t.Fatal(err)
}
}
}
<-closer
log.Warningf(context.TODO(), "resolved all partitions")
}
开发者ID:knz,项目名称:cockroach,代码行数:26,代码来源:partition.go
示例3: testBuildInfoInner
func testBuildInfoInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
checkGossip(t, c, 20*time.Second, hasPeers(c.NumNodes()))
var details serverpb.DetailsResponse
util.SucceedsSoon(t, func() error {
select {
case <-stopper:
t.Fatalf("interrupted")
default:
}
return httputil.GetJSON(cluster.HTTPClient, c.URL(0)+"/_status/details/local", &details)
})
bi := details.BuildInfo
testData := map[string]string{
"go_version": bi.GoVersion,
"tag": bi.Tag,
"time": bi.Time,
"dependencies": bi.Dependencies,
}
for key, val := range testData {
if val == "" {
t.Errorf("build info not set for \"%s\"", key)
}
}
}
开发者ID:knz,项目名称:cockroach,代码行数:26,代码来源:build_info_test.go
示例4: CheckGossip
// CheckGossip fetches the gossip infoStore from each node and invokes the given
// function. The test passes if the function returns 0 for every node,
// retrying for up to the given duration.
func CheckGossip(
ctx context.Context, t testing.TB, c cluster.Cluster, d time.Duration, f CheckGossipFunc,
) {
err := util.RetryForDuration(d, func() error {
select {
case <-stopper.ShouldStop():
t.Fatalf("interrupted")
return nil
case <-time.After(1 * time.Second):
}
var infoStatus gossip.InfoStatus
for i := 0; i < c.NumNodes(); i++ {
if err := httputil.GetJSON(cluster.HTTPClient, c.URL(ctx, i)+"/_status/gossip/local", &infoStatus); err != nil {
return errors.Wrapf(err, "failed to get gossip status from node %d", i)
}
if err := f(infoStatus.Infos); err != nil {
return errors.Errorf("node %d: %s", i, err)
}
}
return nil
})
if err != nil {
t.Fatal(errors.Errorf("condition failed to evaluate within %s: %s", d, err))
}
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:30,代码来源:util.go
示例5: checkGossip
// checkGossip fetches the gossip infoStore from each node and invokes the given
// function. The test passes if the function returns 0 for every node,
// retrying for up to the given duration.
func checkGossip(t *testing.T, c cluster.Cluster, d time.Duration, f checkGossipFunc) {
err := util.RetryForDuration(d, func() error {
select {
case <-stopper:
t.Fatalf("interrupted")
return nil
case <-time.After(1 * time.Second):
}
var infoStatus gossip.InfoStatus
for i := 0; i < c.NumNodes(); i++ {
if err := httputil.GetJSON(cluster.HTTPClient, c.URL(i)+"/_status/gossip/local", &infoStatus); err != nil {
return err
}
if err := f(infoStatus.Infos); err != nil {
return errors.Errorf("node %d: %s", i, err)
}
}
return nil
})
if err != nil {
t.Fatal(errors.Errorf("condition failed to evaluate within %s: %s", d, err))
}
}
开发者ID:knz,项目名称:cockroach,代码行数:28,代码来源:gossip_peerings_test.go
示例6: mustGetHosts
func mustGetHosts(t *testing.T, c cluster.Cluster) ([]iptables.IP, map[iptables.IP]int) {
var addrs []iptables.IP
addrsToNode := make(map[iptables.IP]int)
for i := 0; i < c.NumNodes(); i++ {
addr := iptables.IP(c.InternalIP(i).String())
addrsToNode[addr] = i
addrs = append(addrs, addr)
}
return addrs, addrsToNode
}
开发者ID:knz,项目名称:cockroach,代码行数:10,代码来源:partition.go
示例7: restoreNetwork
func restoreNetwork(t *testing.T, c cluster.Cluster) []error {
var errs []error
for i := 0; i < c.NumNodes(); i++ {
for _, cmd := range iptables.Reset() {
if err := c.ExecRoot(i, cmd); err != nil {
errs = append(errs, err)
}
}
}
return errs
}
开发者ID:knz,项目名称:cockroach,代码行数:11,代码来源:partition.go
示例8: testPutInner
func testPutInner(ctx context.Context, t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
db, err := c.NewClient(ctx, 0)
if err != nil {
t.Fatal(err)
}
errs := make(chan error, c.NumNodes())
start := timeutil.Now()
deadline := start.Add(cfg.Duration)
var count int64
for i := 0; i < c.NumNodes(); i++ {
go func() {
r, _ := randutil.NewPseudoRand()
value := randutil.RandBytes(r, 8192)
for timeutil.Now().Before(deadline) {
k := atomic.AddInt64(&count, 1)
v := value[:r.Intn(len(value))]
if err := db.Put(ctx, fmt.Sprintf("%08d", k), v); err != nil {
errs <- err
return
}
}
errs <- nil
}()
}
for i := 0; i < c.NumNodes(); {
baseCount := atomic.LoadInt64(&count)
select {
case <-stopper.ShouldStop():
t.Fatalf("interrupted")
case err := <-errs:
if err != nil {
t.Fatal(err)
}
i++
case <-time.After(1 * time.Second):
// Periodically print out progress so that we know the test is still
// running.
loadedCount := atomic.LoadInt64(&count)
log.Infof(ctx, "%d (%d/s)", loadedCount, loadedCount-baseCount)
c.Assert(ctx, t)
if err := cluster.Consistent(ctx, c, 0); err != nil {
t.Fatal(err)
}
}
}
elapsed := timeutil.Since(start)
log.Infof(ctx, "%d %.1f/sec", count, float64(count)/elapsed.Seconds())
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:52,代码来源:put_test.go
示例9: testClusterRecoveryInner
func testClusterRecoveryInner(
ctx context.Context, t *testing.T, c cluster.Cluster, cfg cluster.TestConfig,
) {
num := c.NumNodes()
// One client for each node.
initBank(t, c.PGUrl(ctx, 0))
start := timeutil.Now()
state := testState{
t: t,
errChan: make(chan error, num),
teardown: make(chan struct{}),
deadline: start.Add(cfg.Duration),
clients: make([]testClient, num),
}
for i := 0; i < num; i++ {
state.clients[i].Lock()
state.initClient(ctx, t, c, i)
state.clients[i].Unlock()
go transferMoneyLoop(ctx, i, &state, *numAccounts, *maxTransfer)
}
defer func() {
<-state.teardown
}()
// Chaos monkey.
rnd, seed := randutil.NewPseudoRand()
log.Warningf(ctx, "monkey starts (seed %d)", seed)
pickNodes := func() []int {
return rnd.Perm(num)[:rnd.Intn(num)+1]
}
go chaosMonkey(ctx, &state, c, true, pickNodes, 0)
waitClientsStop(ctx, num, &state, stall)
// Verify accounts.
verifyAccounts(t, &state.clients[0])
elapsed := timeutil.Since(start)
var count uint64
counts := state.counts()
for _, c := range counts {
count += c
}
log.Infof(ctx, "%d %.1f/sec", count, float64(count)/elapsed.Seconds())
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:49,代码来源:chaos_test.go
示例10: testRepairInner
func testRepairInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
testStopper := stop.NewStopper()
dc := newDynamicClient(c, testStopper)
testStopper.AddCloser(dc)
defer testStopper.Stop()
// Add some loads.
for i := 0; i < c.NumNodes()*2; i++ {
ID := i
testStopper.RunWorker(func() {
insertLoad(t, dc, ID)
})
}
// TODO(bram): #5345 add repair mechanism.
select {
case <-stopper:
case <-time.After(cfg.Duration):
}
}
开发者ID:knz,项目名称:cockroach,代码行数:21,代码来源:repair_test.go
示例11: testGossipPeeringsInner
func testGossipPeeringsInner(
ctx context.Context, t *testing.T, c cluster.Cluster, cfg cluster.TestConfig,
) {
num := c.NumNodes()
deadline := timeutil.Now().Add(cfg.Duration)
waitTime := longWaitTime
if cfg.Duration < waitTime {
waitTime = shortWaitTime
}
for timeutil.Now().Before(deadline) {
CheckGossip(ctx, t, c, waitTime, HasPeers(num))
// Restart the first node.
log.Infof(ctx, "restarting node 0")
if err := c.Restart(ctx, 0); err != nil {
t.Fatal(err)
}
CheckGossip(ctx, t, c, waitTime, HasPeers(num))
// Restart another node (if there is one).
var pickedNode int
if num > 1 {
pickedNode = rand.Intn(num-1) + 1
}
log.Infof(ctx, "restarting node %d", pickedNode)
if err := c.Restart(ctx, pickedNode); err != nil {
t.Fatal(err)
}
CheckGossip(ctx, t, c, waitTime, HasPeers(num))
}
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:34,代码来源:gossip_peerings_test.go
示例12: BidirectionalPartitionNemesis
// BidirectionalPartitionNemesis is a nemesis which randomly severs the network
// symmetrically between two random groups of nodes. Partitioned and connected
// mode take alternating turns, with random durations of up to 15s.
func BidirectionalPartitionNemesis(t *testing.T, stop <-chan struct{}, c cluster.Cluster) {
randSec := func() time.Duration { return time.Duration(rand.Int63n(15 * int64(time.Second))) }
log.Infof(context.Background(), "cleaning up any previous rules")
_ = restoreNetwork(t, c) // clean up any potential leftovers
log.Infof(context.Background(), "starting partition nemesis")
for {
ch := make(chan struct{})
go func() {
select {
case <-time.After(randSec()):
case <-stop:
}
close(ch)
}()
cutNetwork(t, c, ch, randomBidirectionalPartition(c.NumNodes())...)
select {
case <-stop:
return
case <-time.After(randSec()):
}
}
}
开发者ID:knz,项目名称:cockroach,代码行数:25,代码来源:partition.go
示例13: checkRangeReplication
func checkRangeReplication(t *testing.T, c cluster.Cluster, d time.Duration) {
if c.NumNodes() < 1 {
// Looks silly, but we actually start zero-node clusters in the
// reference tests.
t.Log("replication test is a no-op for empty cluster")
return
}
wantedReplicas := 3
if c.NumNodes() < 3 {
wantedReplicas = c.NumNodes()
}
log.Infof(context.Background(), "waiting for first range to have %d replicas", wantedReplicas)
util.SucceedsSoon(t, func() error {
// Reconnect on every iteration; gRPC will eagerly tank the connection
// on transport errors. Always talk to node 0 because it's guaranteed
// to exist.
client, dbStopper := c.NewClient(t, 0)
defer dbStopper.Stop()
select {
case <-stopper:
t.Fatalf("interrupted")
return nil
case <-time.After(1 * time.Second):
}
foundReplicas, err := countRangeReplicas(client)
if err != nil {
return err
}
if log.V(1) {
log.Infof(context.Background(), "found %d replicas", foundReplicas)
}
if foundReplicas >= wantedReplicas {
return nil
}
return fmt.Errorf("expected %d replicas, only found %d", wantedReplicas, foundReplicas)
})
log.Infof(context.Background(), "found %d replicas", wantedReplicas)
}
开发者ID:knz,项目名称:cockroach,代码行数:45,代码来源:replication_test.go
示例14: testNodeRestartInner
func testNodeRestartInner(
ctx context.Context, t *testing.T, c cluster.Cluster, cfg cluster.TestConfig,
) {
num := c.NumNodes()
if minNum := 3; num < minNum {
t.Skipf("need at least %d nodes, got %d", minNum, num)
}
// One client for each node.
initBank(t, c.PGUrl(ctx, 0))
start := timeutil.Now()
state := testState{
t: t,
errChan: make(chan error, 1),
teardown: make(chan struct{}),
deadline: start.Add(cfg.Duration),
clients: make([]testClient, 1),
}
clientIdx := num - 1
client := &state.clients[0]
client.Lock()
client.db = makePGClient(t, c.PGUrl(ctx, clientIdx))
client.Unlock()
go transferMoneyLoop(ctx, 0, &state, *numAccounts, *maxTransfer)
defer func() {
<-state.teardown
}()
// Chaos monkey.
rnd, seed := randutil.NewPseudoRand()
log.Warningf(ctx, "monkey starts (seed %d)", seed)
pickNodes := func() []int {
return []int{rnd.Intn(clientIdx)}
}
go chaosMonkey(ctx, &state, c, false, pickNodes, clientIdx)
waitClientsStop(ctx, 1, &state, stall)
// Verify accounts.
verifyAccounts(t, client)
elapsed := timeutil.Since(start)
count := atomic.LoadUint64(&client.count)
log.Infof(ctx, "%d %.1f/sec", count, float64(count)/elapsed.Seconds())
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:48,代码来源:chaos_test.go
示例15: testAdminLossOfQuorumInner
func testAdminLossOfQuorumInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
if c.NumNodes() < 2 {
t.Logf("skipping test %s because given cluster has too few nodes", cfg.Name)
return
}
// Get the ids for each node.
nodeIDs := make([]roachpb.NodeID, c.NumNodes())
for i := 0; i < c.NumNodes(); i++ {
var details serverpb.DetailsResponse
if err := httputil.GetJSON(cluster.HTTPClient, c.URL(i)+"/_status/details/local", &details); err != nil {
t.Fatal(err)
}
nodeIDs[i] = details.NodeID
}
// Leave only the first node alive.
for i := 1; i < c.NumNodes(); i++ {
if err := c.Kill(i); err != nil {
t.Fatal(err)
}
}
// Retrieve node statuses.
var nodes serverpb.NodesResponse
if err := httputil.GetJSON(cluster.HTTPClient, c.URL(0)+"/_status/nodes", &nodes); err != nil {
t.Fatal(err)
}
for _, nodeID := range nodeIDs {
var nodeStatus status.NodeStatus
if err := httputil.GetJSON(cluster.HTTPClient, c.URL(0)+"/_status/nodes/"+strconv.Itoa(int(nodeID)), &nodeStatus); err != nil {
t.Fatal(err)
}
}
// Retrieve time-series data.
nowNanos := timeutil.Now().UnixNano()
queryRequest := tspb.TimeSeriesQueryRequest{
StartNanos: nowNanos - 10*time.Second.Nanoseconds(),
EndNanos: nowNanos,
Queries: []tspb.Query{
{Name: "doesn't_matter", Sources: []string{}},
},
}
var queryResponse tspb.TimeSeriesQueryResponse
if err := httputil.PostJSON(cluster.HTTPClient, c.URL(0)+"/ts/query",
&queryRequest, &queryResponse); err != nil {
t.Fatal(err)
}
// TODO(cdo): When we're able to issue SQL queries without a quorum, test all
// admin endpoints that issue SQL queries here.
}
开发者ID:knz,项目名称:cockroach,代码行数:54,代码来源:admin_test.go
示例16: initClient
// initClient initializes the client talking to node "i".
// It requires that the caller hold the client's write lock.
func (state *testState) initClient(t *testing.T, c cluster.Cluster, i int) {
state.clients[i].db = makePGClient(t, c.PGUrl(i))
}
开发者ID:knz,项目名称:cockroach,代码行数:5,代码来源:chaos_test.go
示例17: initClient
// initClient initializes the client talking to node "i".
// It requires that the caller hold the client's write lock.
func (state *testState) initClient(ctx context.Context, t *testing.T, c cluster.Cluster, i int) {
state.clients[i].db = makePGClient(t, c.PGUrl(ctx, i))
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:5,代码来源:chaos_test.go
示例18: chaosMonkey
// chaosMonkey picks a set of nodes and restarts them. If stopClients is set
// all the clients are locked before the nodes are restarted.
func chaosMonkey(
ctx context.Context,
state *testState,
c cluster.Cluster,
stopClients bool,
pickNodes func() []int,
consistentIdx int,
) {
defer close(state.teardown)
for curRound := uint64(1); !state.done(); curRound++ {
atomic.StoreUint64(&state.monkeyIteration, curRound)
select {
case <-stopper.ShouldStop():
return
default:
}
// Pick nodes to be restarted.
nodes := pickNodes()
if stopClients {
// Prevent all clients from writing while nodes are being restarted.
for i := 0; i < len(state.clients); i++ {
state.clients[i].Lock()
}
}
log.Infof(ctx, "round %d: restarting nodes %v", curRound, nodes)
for _, i := range nodes {
// Two early exit conditions.
select {
case <-stopper.ShouldStop():
break
default:
}
if state.done() {
break
}
log.Infof(ctx, "round %d: restarting %d", curRound, i)
if err := c.Kill(ctx, i); err != nil {
state.t.Error(err)
}
if err := c.Restart(ctx, i); err != nil {
state.t.Error(err)
}
if stopClients {
// Reinitialize the client talking to the restarted node.
state.initClient(ctx, state.t, c, i)
}
}
if stopClients {
for i := 0; i < len(state.clients); i++ {
state.clients[i].Unlock()
}
}
preCount := state.counts()
madeProgress := func() bool {
newCounts := state.counts()
for i := range newCounts {
if newCounts[i] > preCount[i] {
return true
}
}
return false
}
// Sleep until at least one client is writing successfully.
log.Warningf(ctx, "round %d: monkey sleeping while cluster recovers...", curRound)
for !state.done() && !madeProgress() {
time.Sleep(time.Second)
}
c.Assert(ctx, state.t)
if err := cluster.Consistent(ctx, c, consistentIdx); err != nil {
state.t.Error(err)
}
log.Warningf(ctx, "round %d: cluster recovered", curRound)
}
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:82,代码来源:chaos_test.go
示例19: testSingleKeyInner
func testSingleKeyInner(
ctx context.Context, t *testing.T, c cluster.Cluster, cfg cluster.TestConfig,
) {
num := c.NumNodes()
// Initialize the value for our test key to zero.
const key = "test-key"
initDB := c.NewClient(ctx, t, 0)
if err := initDB.Put(ctx, key, 0); err != nil {
t.Fatal(err)
}
type result struct {
err error
maxLatency time.Duration
}
resultCh := make(chan result, num)
deadline := timeutil.Now().Add(cfg.Duration)
var expected int64
// Start up num workers each reading and writing the same
// key. Each worker is configured to talk to a different node in the
// cluster.
for i := 0; i < num; i++ {
db := c.NewClient(ctx, t, i)
go func() {
var r result
for timeutil.Now().Before(deadline) {
start := timeutil.Now()
err := db.Txn(ctx, func(txn *client.Txn) error {
minExp := atomic.LoadInt64(&expected)
r, err := txn.Get(key)
if err != nil {
return err
}
b := txn.NewBatch()
v := r.ValueInt()
b.Put(key, v+1)
err = txn.CommitInBatch(b)
// Atomic updates after the fact mean that we should read
// exp or larger (since concurrent writers might have
// committed but not yet performed their atomic update).
if err == nil && v < minExp {
return errors.Errorf("unexpected read: %d, expected >= %d", v, minExp)
}
return err
})
if err != nil {
resultCh <- result{err: err}
return
}
atomic.AddInt64(&expected, 1)
latency := timeutil.Since(start)
if r.maxLatency < latency {
r.maxLatency = latency
}
}
resultCh <- r
}()
}
// Verify that none of the workers encountered an error.
var results []result
for len(results) < num {
select {
case <-stopper.ShouldStop():
t.Fatalf("interrupted")
case r := <-resultCh:
if r.err != nil {
t.Fatal(r.err)
}
results = append(results, r)
case <-time.After(1 * time.Second):
// Periodically print out progress so that we know the test is still
// running.
log.Infof(ctx, "%d", atomic.LoadInt64(&expected))
}
}
// Verify the resulting value stored at the key is what we expect.
r, err := initDB.Get(ctx, key)
if err != nil {
t.Fatal(err)
}
v := r.ValueInt()
if expected != v {
t.Fatalf("expected %d, but found %d", expected, v)
}
var maxLatency []time.Duration
for _, r := range results {
maxLatency = append(maxLatency, r.maxLatency)
}
log.Infof(ctx, "%d increments: %s", v, maxLatency)
}
开发者ID:jmptrader,项目名称:cockroach,代码行数:95,代码来源:single_key_test.go
示例20: testEventLogInner
func testEventLogInner(
ctx context.Context, t *testing.T, c cluster.Cluster, cfg cluster.TestConfig,
) {
num := c.NumNodes()
if num <= 0 {
t.Fatalf("%d nodes in cluster", num)
}
var confirmedClusterID uuid.UUID
type nodeEventInfo struct {
Descriptor roachpb.NodeDescriptor
ClusterID uuid.UUID
}
// Verify that a node_join message was logged for each node in the cluster.
// We expect there to eventually be one such message for each node in the
// cluster, and each message must be correctly formatted.
util.SucceedsSoon(t, func() error {
db := makePGClient(t, c.PGUrl(ctx, 0))
defer db.Close()
// Query all node join events. There should be one for each node in the
// cluster.
rows, err := db.Query(
"SELECT targetID, info FROM system.eventlog WHERE eventType = $1",
string(csql.EventLogNodeJoin))
if err != nil {
return err
}
seenIds := make(map[int64]struct{})
var clusterID uuid.UUID
for rows.Next() {
var targetID int64
var infoStr gosql.NullString
if err := rows.Scan(&targetID, &infoStr); err != nil {
t.Fatal(err)
}
// Verify the stored node descriptor.
if !infoStr.Valid {
t.Fatalf("info not recorded for node join, target node %d", targetID)
}
var info nodeEventInfo
if err := json.Unmarshal([]byte(infoStr.String), &info); err != nil {
t.Fatal(err)
}
if a, e := int64(info.Descriptor.NodeID), targetID; a != e {
t.Fatalf("Node join with targetID %d had descriptor for wrong node %d", e, a)
}
// Verify cluster ID is recorded, and is the same for all nodes.
if (info.ClusterID == uuid.UUID{}) {
t.Fatalf("Node join recorded nil cluster id, info: %v", info)
}
if (clusterID == uuid.UUID{}) {
clusterID = info.ClusterID
} else if clusterID != info.ClusterID {
t.Fatalf(
"Node join recorded different cluster ID than earlier node. Expected %s, got %s. Info: %v",
clusterID, info.ClusterID, info)
}
// Verify that all NodeIDs are different.
if _, ok := seenIds[targetID]; ok {
t.Fatalf("Node ID %d seen in two different node join messages", targetID)
}
seenIds[targetID] = struct{}{}
}
if err := rows.Err(); err != nil {
return err
}
if a, e := len(seenIds), c.NumNodes(); a != e {
return errors.Errorf("expected %d node join messages, found %d: %v", e, a, seenIds)
}
confirmedClusterID = clusterID
return nil
})
// Stop and Start Node 0, and verify the node restart message.
if err := c.Kill(ctx, 0); err != nil {
t.Fatal(err)
}
if err := c.Restart(ctx, 0); err != nil {
t.Fatal(err)
}
util.SucceedsSoon(t, func() error {
db := makePGClient(t, c.PGUrl(ctx, 0))
defer db.Close()
// Query all node restart events. There should only be one.
rows, err := db.Query(
"SELECT targetID, info FROM system.eventlog WHERE eventType = $1",
string(csql.EventLogNodeRestart))
if err != nil {
return err
}
//.........这里部分代码省略.........
开发者ID:jmptrader,项目名称:cockroach,代码行数:101,代码来源:event_log_test.go
注:本文中的github.com/cockroachdb/cockroach/pkg/acceptance/cluster.Cluster类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论