本文整理汇总了Golang中github.com/cockroachdb/cockroach/pkg/util/timeutil.Since函数的典型用法代码示例。如果您正苦于以下问题:Golang Since函数的具体用法?Golang Since怎么用?Golang Since使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Since函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: processReplica
// processReplica processes a single replica. This should not be
// called externally to the queue. bq.mu.Lock must not be held
// while calling this method.
func (bq *baseQueue) processReplica(
queueCtx context.Context, repl *Replica, clock *hlc.Clock,
) error {
bq.processMu.Lock()
defer bq.processMu.Unlock()
// Load the system config.
cfg, ok := bq.gossip.GetSystemConfig()
if !ok {
log.VEventf(queueCtx, 1, "no system config available, skipping")
return nil
}
if bq.requiresSplit(cfg, repl) {
// Range needs to be split due to zone configs, but queue does
// not accept unsplit ranges.
log.VEventf(queueCtx, 3, "%s: split needed; skipping", repl)
return nil
}
// Putting a span in a context means that events will no longer go to the
// event log. Use queueCtx for events that are intended for the event log.
ctx, span := bq.AnnotateCtxWithSpan(queueCtx, bq.name)
defer span.Finish()
// Also add the Replica annotations to ctx.
ctx = repl.AnnotateCtx(ctx)
ctx, cancel := context.WithTimeout(ctx, bq.processTimeout)
defer cancel()
log.Eventf(ctx, "processing replica")
// If the queue requires a replica to have the range lease in
// order to be processed, check whether this replica has range lease
// and renew or acquire if necessary.
if bq.needsLease {
// Create a "fake" get request in order to invoke redirectOnOrAcquireLease.
if err := repl.redirectOnOrAcquireLease(ctx); err != nil {
if _, harmless := err.GetDetail().(*roachpb.NotLeaseHolderError); harmless {
log.VEventf(queueCtx, 3, "not holding lease; skipping")
return nil
}
return errors.Wrapf(err.GoError(), "%s: could not obtain lease", repl)
}
log.Event(ctx, "got range lease")
}
log.VEventf(queueCtx, 3, "processing")
start := timeutil.Now()
err := bq.impl.process(ctx, clock.Now(), repl, cfg)
duration := timeutil.Since(start)
bq.processingNanos.Inc(duration.Nanoseconds())
if err != nil {
return err
}
log.VEventf(queueCtx, 2, "done: %s", duration)
log.Event(ctx, "done")
bq.successes.Inc(1)
return nil
}
开发者ID:knz,项目名称:cockroach,代码行数:61,代码来源:queue.go
示例2: testDecimalSingleArgFunc
func testDecimalSingleArgFunc(
t *testing.T,
f func(*inf.Dec, *inf.Dec, inf.Scale) (*inf.Dec, error),
s inf.Scale,
tests []decimalOneArgTestCase,
) {
for _, tc := range tests {
t.Run(fmt.Sprintf("%s = %s", tc.input, tc.expected), func(t *testing.T) {
x, exp := new(inf.Dec), new(inf.Dec)
x.SetString(tc.input)
exp.SetString(tc.expected)
// Test allocated return value.
var z *inf.Dec
var err error
done := make(chan struct{}, 1)
start := timeutil.Now()
go func() {
z, err = f(nil, x, s)
done <- struct{}{}
}()
var after <-chan time.Time
if *flagDurationLimit > 0 {
after = time.After(*flagDurationLimit)
}
select {
case <-done:
t.Logf("execute duration: %s", timeutil.Since(start))
case <-after:
t.Fatalf("timedout after %s", *flagDurationLimit)
}
if err != nil {
if tc.expected != err.Error() {
t.Errorf("expected error %s, got %s", tc.expected, err)
}
return
}
if exp.Cmp(z) != 0 {
t.Errorf("expected %s, got %s", exp, z)
}
// Test provided decimal mutation.
z.SetString("0.0")
_, _ = f(z, x, s)
if exp.Cmp(z) != 0 {
t.Errorf("expected %s, got %s", exp, z)
}
// Test same arg mutation.
_, _ = f(x, x, s)
if exp.Cmp(x) != 0 {
t.Errorf("expected %s, got %s", exp, x)
}
x.SetString(tc.input)
})
}
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:57,代码来源:decimal_test.go
示例3: testPutInner
func testPutInner(ctx context.Context, t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
db, err := c.NewClient(ctx, 0)
if err != nil {
t.Fatal(err)
}
errs := make(chan error, c.NumNodes())
start := timeutil.Now()
deadline := start.Add(cfg.Duration)
var count int64
for i := 0; i < c.NumNodes(); i++ {
go func() {
r, _ := randutil.NewPseudoRand()
value := randutil.RandBytes(r, 8192)
for timeutil.Now().Before(deadline) {
k := atomic.AddInt64(&count, 1)
v := value[:r.Intn(len(value))]
if err := db.Put(ctx, fmt.Sprintf("%08d", k), v); err != nil {
errs <- err
return
}
}
errs <- nil
}()
}
for i := 0; i < c.NumNodes(); {
baseCount := atomic.LoadInt64(&count)
select {
case <-stopper.ShouldStop():
t.Fatalf("interrupted")
case err := <-errs:
if err != nil {
t.Fatal(err)
}
i++
case <-time.After(1 * time.Second):
// Periodically print out progress so that we know the test is still
// running.
loadedCount := atomic.LoadInt64(&count)
log.Infof(ctx, "%d (%d/s)", loadedCount, loadedCount-baseCount)
c.Assert(ctx, t)
if err := cluster.Consistent(ctx, c, 0); err != nil {
t.Fatal(err)
}
}
}
elapsed := timeutil.Since(start)
log.Infof(ctx, "%d %.1f/sec", count, float64(count)/elapsed.Seconds())
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:52,代码来源:put_test.go
示例4: testClusterRecoveryInner
func testClusterRecoveryInner(
ctx context.Context, t *testing.T, c cluster.Cluster, cfg cluster.TestConfig,
) {
num := c.NumNodes()
// One client for each node.
initBank(t, c.PGUrl(ctx, 0))
start := timeutil.Now()
state := testState{
t: t,
errChan: make(chan error, num),
teardown: make(chan struct{}),
deadline: start.Add(cfg.Duration),
clients: make([]testClient, num),
}
for i := 0; i < num; i++ {
state.clients[i].Lock()
state.initClient(ctx, t, c, i)
state.clients[i].Unlock()
go transferMoneyLoop(ctx, i, &state, *numAccounts, *maxTransfer)
}
defer func() {
<-state.teardown
}()
// Chaos monkey.
rnd, seed := randutil.NewPseudoRand()
log.Warningf(ctx, "monkey starts (seed %d)", seed)
pickNodes := func() []int {
return rnd.Perm(num)[:rnd.Intn(num)+1]
}
go chaosMonkey(ctx, &state, c, true, pickNodes, 0)
waitClientsStop(ctx, num, &state, stall)
// Verify accounts.
verifyAccounts(t, &state.clients[0])
elapsed := timeutil.Since(start)
var count uint64
counts := state.counts()
for _, c := range counts {
count += c
}
log.Infof(ctx, "%d %.1f/sec", count, float64(count)/elapsed.Seconds())
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:49,代码来源:chaos_test.go
示例5: TestSucceedsSoon
func TestSucceedsSoon(t *testing.T) {
// Try a method which always succeeds.
SucceedsSoon(t, func() error { return nil })
// Try a method which succeeds after a known duration.
start := timeutil.Now()
duration := time.Millisecond * 10
SucceedsSoon(t, func() error {
elapsed := timeutil.Since(start)
if elapsed > duration {
return nil
}
return errors.Errorf("%s elapsed, waiting until %s elapses", elapsed, duration)
})
}
开发者ID:knz,项目名称:cockroach,代码行数:15,代码来源:testing_test.go
示例6: waitForFullReplication
// waitForFullReplication waits for the cluster to be fully replicated.
func (c *Cluster) waitForFullReplication() {
for i := 1; true; i++ {
done, detail := c.isReplicated()
if (done && i >= 50) || (i%50) == 0 {
fmt.Print(detail)
log.Infof(context.Background(), "waiting for replication")
}
if done {
break
}
time.Sleep(100 * time.Millisecond)
}
log.Infof(context.Background(), "replicated %.3fs", timeutil.Since(c.started).Seconds())
}
开发者ID:knz,项目名称:cockroach,代码行数:16,代码来源:localcluster.go
示例7: clientStatus
func (g *Gossip) clientStatus() string {
var buf bytes.Buffer
g.mu.Lock()
defer g.mu.Unlock()
g.clientsMu.Lock()
defer g.clientsMu.Unlock()
fmt.Fprintf(&buf, "gossip client (%d/%d cur/max conns)\n", len(g.clientsMu.clients), g.outgoing.maxSize)
for _, c := range g.clientsMu.clients {
fmt.Fprintf(&buf, " %d: %s (%s: %s)\n",
c.peerID, c.addr, roundSecs(timeutil.Since(c.createdAt)), c.clientMetrics)
}
return buf.String()
}
开发者ID:knz,项目名称:cockroach,代码行数:15,代码来源:gossip.go
示例8: testNodeRestartInner
func testNodeRestartInner(
ctx context.Context, t *testing.T, c cluster.Cluster, cfg cluster.TestConfig,
) {
num := c.NumNodes()
if minNum := 3; num < minNum {
t.Skipf("need at least %d nodes, got %d", minNum, num)
}
// One client for each node.
initBank(t, c.PGUrl(ctx, 0))
start := timeutil.Now()
state := testState{
t: t,
errChan: make(chan error, 1),
teardown: make(chan struct{}),
deadline: start.Add(cfg.Duration),
clients: make([]testClient, 1),
}
clientIdx := num - 1
client := &state.clients[0]
client.Lock()
client.db = makePGClient(t, c.PGUrl(ctx, clientIdx))
client.Unlock()
go transferMoneyLoop(ctx, 0, &state, *numAccounts, *maxTransfer)
defer func() {
<-state.teardown
}()
// Chaos monkey.
rnd, seed := randutil.NewPseudoRand()
log.Warningf(ctx, "monkey starts (seed %d)", seed)
pickNodes := func() []int {
return []int{rnd.Intn(clientIdx)}
}
go chaosMonkey(ctx, &state, c, false, pickNodes, clientIdx)
waitClientsStop(ctx, 1, &state, stall)
// Verify accounts.
verifyAccounts(t, client)
elapsed := timeutil.Since(start)
count := atomic.LoadUint64(&client.count)
log.Infof(ctx, "%d %.1f/sec", count, float64(count)/elapsed.Seconds())
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:48,代码来源:chaos_test.go
示例9: finishSQLTxn
// finishSQLTxn closes the root span for the current SQL txn.
// This needs to be called before resetForNewSQLTransaction() is called for
// starting another SQL txn.
// The session context is just used for logging the SQL trace.
func (ts *txnState) finishSQLTxn(sessionCtx context.Context) {
ts.mon.Stop(ts.Ctx)
if ts.sp == nil {
panic("No span in context? Was resetForNewSQLTxn() called previously?")
}
sampledFor7881 := (ts.sp.BaggageItem(keyFor7881Sample) != "")
ts.sp.Finish()
ts.sp = nil
if (traceSQL && timeutil.Since(ts.sqlTimestamp) >= traceSQLDuration) ||
(traceSQLFor7881 && sampledFor7881) {
dump := tracing.FormatRawSpans(ts.CollectedSpans)
if len(dump) > 0 {
log.Infof(sessionCtx, "SQL trace:\n%s", dump)
}
}
}
开发者ID:hvaara,项目名称:cockroach,代码行数:20,代码来源:session.go
示例10: scanLoop
// scanLoop loops endlessly, scanning through replicas available via
// the replica set, or until the scanner is stopped. The iteration
// is paced to complete a full scan in approximately the scan interval.
func (rs *replicaScanner) scanLoop(clock *hlc.Clock, stopper *stop.Stopper) {
stopper.RunWorker(func() {
ctx := rs.AnnotateCtx(context.Background())
start := timeutil.Now()
// waitTimer is reset in each call to waitAndProcess.
defer rs.waitTimer.Stop()
for {
if rs.GetDisabled() {
if done := rs.waitEnabled(stopper); done {
return
}
continue
}
var shouldStop bool
count := 0
rs.replicas.Visit(func(repl *Replica) bool {
count++
shouldStop = rs.waitAndProcess(ctx, start, clock, stopper, repl)
return !shouldStop
})
if count == 0 {
// No replicas processed, just wait.
shouldStop = rs.waitAndProcess(ctx, start, clock, stopper, nil)
}
shouldStop = shouldStop || nil != stopper.RunTask(func() {
// Increment iteration count.
rs.mu.Lock()
defer rs.mu.Unlock()
rs.mu.scanCount++
rs.mu.total += timeutil.Since(start)
if log.V(6) {
log.Infof(ctx, "reset replica scan iteration")
}
// Reset iteration and start time.
start = timeutil.Now()
})
if shouldStop {
return
}
}
})
}
开发者ID:knz,项目名称:cockroach,代码行数:49,代码来源:scanner.go
示例11: Start
// Start starts a cluster. The numWorkers parameter controls the SQL connection
// settings to avoid unnecessary connection creation. The args parameter can be
// used to pass extra arguments to each node.
func (c *Cluster) Start(db string, numWorkers int, args, env []string) {
c.started = timeutil.Now()
baseCtx := &base.Config{
User: security.NodeUser,
Insecure: true,
}
c.rpcCtx = rpc.NewContext(log.AmbientContext{}, baseCtx, nil, c.stopper)
for i := range c.Nodes {
c.Nodes[i] = c.makeNode(i, args, env)
c.Clients[i] = c.makeClient(i)
c.Status[i] = c.makeStatus(i)
c.DB[i] = c.makeDB(i, numWorkers, db)
}
log.Infof(context.Background(), "started %.3fs", timeutil.Since(c.started).Seconds())
c.waitForFullReplication()
}
开发者ID:knz,项目名称:cockroach,代码行数:22,代码来源:localcluster.go
示例12: PeriodicallyCheckForUpdates
// PeriodicallyCheckForUpdates starts a background worker that periodically
// phones home to check for updates and report usage.
func (s *Server) PeriodicallyCheckForUpdates() {
s.stopper.RunWorker(func() {
startup := timeutil.Now()
for {
// `maybeCheckForUpdates` and `maybeReportUsage` both return the
// duration until they should next be checked.
// Wait for the shorter of the durations returned by the two checks.
wait := s.maybeCheckForUpdates()
if reportWait := s.maybeReportUsage(timeutil.Since(startup)); reportWait < wait {
wait = reportWait
}
jitter := rand.Intn(updateCheckJitterSeconds) - updateCheckJitterSeconds/2
wait = wait + (time.Duration(jitter) * time.Second)
select {
case <-s.stopper.ShouldQuiesce():
return
case <-time.After(wait):
}
}
})
}
开发者ID:knz,项目名称:cockroach,代码行数:24,代码来源:updates.go
示例13: TestConcurrentBatch
func TestConcurrentBatch(t *testing.T) {
defer leaktest.AfterTest(t)()
dir, err := ioutil.TempDir("", "TestConcurrentBatch")
if err != nil {
t.Fatal(err)
}
defer func() {
if err := os.RemoveAll(dir); err != nil {
t.Fatal(err)
}
}()
db, err := NewRocksDB(roachpb.Attributes{}, dir, RocksDBCache{},
0, DefaultMaxOpenFiles)
if err != nil {
t.Fatalf("could not create new rocksdb db instance at %s: %v", dir, err)
}
defer db.Close()
// Prepare 16 4 MB batches containing non-overlapping contents.
var batches []Batch
for i := 0; i < 16; i++ {
batch := db.NewBatch()
for j := 0; true; j++ {
key := encoding.EncodeUvarintAscending([]byte("bar"), uint64(i))
key = encoding.EncodeUvarintAscending(key, uint64(j))
if err := batch.Put(MakeMVCCMetadataKey(key), nil); err != nil {
t.Fatal(err)
}
if len(batch.Repr()) >= 4<<20 {
break
}
}
batches = append(batches, batch)
}
errChan := make(chan error, len(batches))
// Concurrently write all the batches.
for _, batch := range batches {
go func(batch Batch) {
errChan <- batch.Commit()
}(batch)
}
// While the batch writes are in progress, try to write another key.
time.Sleep(100 * time.Millisecond)
remainingBatches := len(batches)
for i := 0; remainingBatches > 0; i++ {
select {
case err := <-errChan:
if err != nil {
t.Fatal(err)
}
remainingBatches--
default:
}
// This write can get delayed excessively if we hit the max memtable count
// or the L0 stop writes threshold.
start := timeutil.Now()
key := encoding.EncodeUvarintAscending([]byte("foo"), uint64(i))
if err := db.Put(MakeMVCCMetadataKey(key), nil); err != nil {
t.Fatal(err)
}
if elapsed := timeutil.Since(start); elapsed >= 10*time.Second {
t.Fatalf("write took %0.1fs\n", elapsed.Seconds())
}
}
}
开发者ID:hvaara,项目名称:cockroach,代码行数:71,代码来源:rocksdb_test.go
示例14: grpcTransportFactory
// grpcTransportFactory during race builds wraps the implementation and
// intercepts all BatchRequests, reading them in a tight loop. This allows the
// race detector to catch any mutations of a batch passed to the transport.
func grpcTransportFactory(
opts SendOptions, rpcContext *rpc.Context, replicas ReplicaSlice, args roachpb.BatchRequest,
) (Transport, error) {
if atomic.AddInt32(&running, 1) <= 1 {
rpcContext.Stopper.RunWorker(func() {
var iters int
var curIdx int
defer func() {
atomic.StoreInt32(&running, 0)
log.Infof(
context.TODO(),
"transport race promotion: ran %d iterations on up to %d requests",
iters, curIdx+1,
)
}()
// Make a fixed-size slice of *BatchRequest. When full, entries
// are evicted in FIFO order.
const size = 1000
bas := make([]*roachpb.BatchRequest, size)
encoder := gob.NewEncoder(ioutil.Discard)
for {
iters++
start := timeutil.Now()
for _, ba := range bas {
if ba != nil {
if err := encoder.Encode(ba); err != nil {
panic(err)
}
}
}
// Prevent the goroutine from spinning too hot as this lets CI
// times skyrocket. Sleep on average for as long as we worked
// on the last iteration so we spend no more than half our CPU
// time on this task.
jittered := time.After(jitter(timeutil.Since(start)))
// Collect incoming requests until the jittered timer fires,
// then access everything we have.
for {
select {
case <-rpcContext.Stopper.ShouldStop():
return
case ba := <-incoming:
bas[curIdx%size] = ba
curIdx++
continue
case <-jittered:
}
break
}
}
})
}
select {
// We have a shallow copy here and so the top level scalar fields can't
// really race, but making more copies doesn't make anything more
// transparent, so from now on we operate on a pointer.
case incoming <- &args:
default:
// Avoid slowing down the tests if we're backed up.
}
return grpcTransportFactoryImpl(opts, rpcContext, replicas, args)
}
开发者ID:knz,项目名称:cockroach,代码行数:65,代码来源:transport_race.go
示例15: TestSessionFinishRollsBackTxn
//.........这里部分代码省略.........
if connClosed {
return
}
if err := conn.Close(); err != nil {
t.Fatal(err)
}
}()
txn, err := conn.Begin()
if err != nil {
t.Fatal(err)
}
tx := txn.(driver.Execer)
if _, err := tx.Exec("SET TRANSACTION PRIORITY NORMAL", nil); err != nil {
t.Fatal(err)
}
if state == sql.RestartWait || state == sql.CommitWait {
if _, err := tx.Exec("SAVEPOINT cockroach_restart", nil); err != nil {
t.Fatal(err)
}
}
insertStmt := "INSERT INTO t.test(k, v) VALUES (1, 'a')"
if state == sql.RestartWait {
// To get a txn in RestartWait, we'll use an aborter.
if err := aborter.QueueStmtForAbortion(
insertStmt, 1 /* restartCount */, false /* willBeRetriedIbid */); err != nil {
t.Fatal(err)
}
}
if _, err := tx.Exec(insertStmt, nil); err != nil {
t.Fatal(err)
}
if err := aborter.VerifyAndClear(); err != nil {
t.Fatal(err)
}
if state == sql.RestartWait || state == sql.CommitWait {
_, err := tx.Exec("RELEASE SAVEPOINT cockroach_restart", nil)
if state == sql.CommitWait {
if err != nil {
t.Fatal(err)
}
} else if !testutils.IsError(err, "pq: restart transaction:.*") {
t.Fatal(err)
}
}
// Abruptly close the connection.
connClosed = true
if err := conn.Close(); err != nil {
t.Fatal(err)
}
// Check that the txn we had above was rolled back. We do this by reading
// after the preceding txn and checking that we don't get an error and
// that we haven't been blocked by intents (we can't exactly test that we
// haven't been blocked but we assert that the query didn't take too
// long).
// We do the read in an explicit txn so that automatic retries don't hide
// any errors.
// TODO(andrei): Figure out a better way to test for non-blocking.
// Use a trace when the client-side tracing story gets good enough.
txCheck, err := mainDB.Begin()
if err != nil {
t.Fatal(err)
}
// Run check at low priority so we don't push the previous transaction and
// fool ourselves into thinking it had been rolled back.
if _, err := txCheck.Exec("SET TRANSACTION PRIORITY LOW"); err != nil {
t.Fatal(err)
}
ts := timeutil.Now()
var count int
if err := txCheck.QueryRow("SELECT COUNT(1) FROM t.test").Scan(&count); err != nil {
t.Fatal(err)
}
// CommitWait actually committed, so we'll need to clean up.
if state != sql.CommitWait {
if count != 0 {
t.Fatalf("expected no rows, got: %d", count)
}
} else {
if _, err := txCheck.Exec("DELETE FROM t.test"); err != nil {
t.Fatal(err)
}
}
if err := txCheck.Commit(); err != nil {
t.Fatal(err)
}
if d := timeutil.Since(ts); d > time.Second {
t.Fatalf("Looks like the checking tx was unexpectedly blocked. "+
"It took %s to commit.", d)
}
})
}
}
开发者ID:knz,项目名称:cockroach,代码行数:101,代码来源:session_test.go
示例16: TestDumpRandom
// TestDumpRandom generates a random number of random rows with all data
// types. This data is dumped, inserted, and dumped again. The two dumps
// are compared for exactness. The data from the inserted dump is then
// SELECT'd and compared to the original generated data to ensure it is
// round-trippable.
func TestDumpRandom(t *testing.T) {
defer leaktest.AfterTest(t)()
c, err := newCLITest(t, false)
if err != nil {
t.Fatal(err)
}
defer c.stop(true)
url, cleanup := sqlutils.PGUrl(t, c.ServingAddr(), "TestDumpRandom", url.User(security.RootUser))
defer cleanup()
conn := makeSQLConn(url.String())
defer conn.Close()
if err := conn.Exec(`
CREATE DATABASE d;
CREATE DATABASE o;
CREATE TABLE d.t (
rowid int,
i int,
f float,
d date,
m timestamp,
n interval,
o bool,
e decimal,
s string,
b bytes,
PRIMARY KEY (rowid, i, f, d, m, n, o, e, s, b)
);
`, nil); err != nil {
t.Fatal(err)
}
rnd, seed := randutil.NewPseudoRand()
t.Logf("random seed: %v", seed)
start := timeutil.Now()
for iteration := 0; timeutil.Since(start) < *randomTestTime; iteration++ {
if err := conn.Exec(`DELETE FROM d.t`, nil); err != nil {
t.Fatal(err)
}
var generatedRows [][]driver.Value
count := rnd.Int63n(500)
t.Logf("random iteration %v: %v rows", iteration, count)
for _i := int64(0); _i < count; _i++ {
// Generate a random number of random inserts.
i := rnd.Int63()
f := rnd.Float64()
d := time.Unix(0, rnd.Int63()).Round(time.Hour * 24).UTC()
m := time.Unix(0, rnd.Int63()).Round(time.Microsecond).UTC()
n := time.Duration(rnd.Int63()).String()
o := rnd.Intn(2) == 1
e := strings.TrimRight(inf.NewDec(rnd.Int63(), inf.Scale(rnd.Int31n(20)-10)).String(), ".0")
sr := make([]byte, rnd.Intn(500))
if _, err := rnd.Read(sr); err != nil {
t.Fatal(err)
}
s := make([]byte, 0, len(sr))
for _, b := range sr {
r := rune(b)
if !utf8.ValidRune(r) {
continue
}
s = append(s, []byte(string(r))...)
}
b := make([]byte, rnd.Intn(500))
if _, err := rnd.Read(b); err != nil {
t.Fatal(err)
}
vals := []driver.Value{
_i,
i,
f,
d,
m,
[]byte(n), // intervals come out as `[]byte`s
o,
[]byte(e), // decimals come out as `[]byte`s
string(s),
b,
}
if err := conn.Exec("INSERT INTO d.t VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)", vals); err != nil {
t.Fatal(err)
}
generatedRows = append(generatedRows, vals[1:])
}
check := func(table string) {
q := fmt.Sprintf("SELECT i, f, d, m, n, o, e, s, b FROM %s ORDER BY rowid", table)
nrows, err := conn.Query(q, nil)
if err != nil {
//.........这里部分代码省略.........
开发者ID:BramGruneir,项目名称:cockroach,代码行数:101,代码来源:dump_test.go
示例17: Start
//.........这里部分代码省略.........
}
// Attempt to unmarshal config into a table/database descriptor.
var descriptor sqlbase.Descriptor
if err := kv.Value.GetProto(&descriptor); err != nil {
log.Warningf(context.TODO(), "%s: unable to unmarshal descriptor %v", kv.Key, kv.Value)
continue
}
switch union := descriptor.Union.(type) {
case *sqlbase.Descriptor_Table:
table := union.Table
table.MaybeUpgradeFormatVersion()
if err := table.ValidateTable(); err != nil {
log.Errorf(context.TODO(), "%s: received invalid table descriptor: %v", kv.Key, table)
continue
}
// Keep track of outstanding schema changes.
// If all schema change commands always set UpVersion, why
// check for the presence of mutations?
// A schema change execution might fail soon after
// unsetting UpVersion, and we still want to process
// outstanding mutations. Similar with a table marked for deletion.
if table.UpVersion || table.Dropped() || table.Adding() ||
table.Renamed() || len(table.Mutations) > 0 {
if log.V(2) {
log.Infof(context.TODO(), "%s: queue up pending schema change; table: %d, version: %d",
kv.Key, table.ID, table.Version)
}
// Only track the first schema change. We depend on
// gossip to renotify us when a schema change has been
// completed.
schemaChanger.tableID = table.ID
if len(table.Mutations) == 0 {
schemaChanger.mutationID = sqlbase.InvalidMutationID
} else {
schemaChanger.mutationID = table.Mutations[0].MutationID
}
schemaChanger.execAfter = execAfter
// Keep track of this schema change.
// Remove from oldSchemaChangers map.
delete(oldSchemaChangers, table.ID)
if sc, ok := s.schemaChangers[table.ID]; ok {
if sc.mutationID == schemaChanger.mutationID {
// Ignore duplicate.
continue
}
}
s.schemaChangers[table.ID] = schemaChanger
}
case *sqlbase.Descriptor_Database:
// Ignore.
}
}
// Delete old schema changers.
for k := range oldSchemaChangers {
delete(s.schemaChangers, k)
}
timer = s.newTimer()
case <-timer.C:
if s.testingKnobs.AsyncExecNotification != nil &&
s.testingKnobs.AsyncExecNotification() != nil {
timer = s.newTimer()
continue
}
for tableID, sc := range s.schemaChangers {
if timeutil.Since(sc.execAfter) > 0 {
err := sc.exec()
if err != nil {
if err == errExistingSchemaChangeLease {
} else if err == sqlbase.ErrDescriptorNotFound {
// Someone deleted this table. Don't try to run the schema
// changer again. Note that there's no gossip update for the
// deletion which would remove this schemaChanger.
delete(s.schemaChangers, tableID)
} else {
// We don't need to act on integrity
// constraints violations because exec()
// purges mutations that violate integrity
// constraints.
log.Warningf(context.TODO(), "Error executing schema change: %s", err)
}
}
// Advance the execAfter time so that this schema
// changer doesn't get called again for a while.
sc.execAfter = timeutil.Now().Add(delay)
}
// Only attempt to run one schema changer.
break
}
timer = s.newTimer()
case <-stopper.ShouldStop():
return
}
}
})
}
开发者ID:knz,项目名称:cockroach,代码行数:101,代码来源:schema_changer.go
示例18: testDecimalDoubleArgFunc
func testDecimalDoubleArgFunc(
t *testing.T,
f func(*inf.Dec, *inf.Dec, *inf.Dec, inf.Scale) (*inf.Dec, error),
s inf.Scale,
tests []decimalTwoArgsTestCase,
) {
for _, tc := range tests {
t.Run(fmt.Sprintf("%s, %s = %s", tc.input1, tc.input2, tc.expected), func(t *testing.T) {
x, y, exp := new(inf.Dec), new(inf.Dec), new(inf.Dec)
if _, ok := x.SetString(tc.input1); !ok {
t.Fatalf("could not set decimal: %s", tc.input1)
}
if _, ok := y.SetString(tc.input2); !ok {
t.Fatalf("could not set decimal: %s", tc.input2)
}
// Test allocated return value.
var z *inf.Dec
var err error
done := make(chan struct{}, 1)
start := timeutil.Now()
go func() {
z, err = f(nil, x, y, s)
done <- struct{}{}
}()
var after <-chan time.Time
if *flagDurationLimit > 0 {
after = time.After(*flagDurationLimit)
}
select {
case <-done:
t.Logf("execute duration: %s", timeutil.Since(start))
case <-after:
t.Fatalf("timedout after %s", *flagDurationLimit)
}
if err != nil {
if tc.expected != err.Error() {
t.Errorf("expected error %s, got %s", tc.expected, err)
}
return
}
if z == nil {
if tc.expected != "nil" {
t.Errorf("expected %s, got nil", tc.expected)
}
return
} else if tc.expected == "nil" {
t.Errorf("expected nil, got %s", z)
return
}
if _, ok := exp.SetString(tc.expected); !ok {
t.Errorf("could not set decimal: %s", tc.expected)
return
}
if exp.Cmp(z) != 0 {
t.Errorf("expected %s, got %s", exp, z)
}
// Test provided decimal mutation.
z.SetString("0.0")
_, _ = f(z, x, y, s)
if exp.Cmp(z) != 0 {
t.Errorf("expected %s, got %s", exp, z)
}
// Test first arg mutation.
_, _ = f(x, x, y, s)
if exp.Cmp(x) != 0 {
t.Errorf("expected %s, got %s", exp, x)
}
x.SetString(tc.input1)
// Test second arg mutation.
_, _ = f(y, x, y, s)
if exp.Cmp(y) != 0 {
t.Errorf("expected %s, got %s", exp, y)
}
y.SetString(tc.input2)
// Test both arg mutation, if possible.
if tc.input1 == tc.input2 {
_, _ = f(x, x, x, s)
if exp.Cmp(x) != 0 {
t.Errorf("expected %s, got %s", exp, x)
}
x.SetString(tc.input1)
}
})
}
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:90,代码来源:decimal_test.go
示例19: Start
//.........这里部分代码省略.........
s.engines, err = s.cfg.CreateEngines()
if err != nil {
return errors.Wrap(err, "failed to create engines")
}
s.stopper.AddCloser(&s.engines)
// We might have to sleep a bit to protect against this node producing non-
// monotonic timestamps. Before restarting, its clock might have been driven
// by other nodes' fast clocks, but when we restarted, we lost all this
// information. For example, a client might have written a value at a
// timestamp that's in the future of the restarted node's clock, and if we
// don't do something, the same client's read would not return the written
// value. So, we wait up to MaxOffset; we couldn't have served timestamps more
// than MaxOffset in the future (assuming that MaxOffset was not changed, see
// #9733).
//
// As an optimization for tests, we don't sleep if all the stores are brand
// new. In this case, the node will not serve anything anyway until it
// synchronizes with other nodes.
{
anyStoreBootstrapped := false
for _, e := range s.engines {
if _, err := storage.ReadStoreIdent(ctx, e); err != nil {
// NotBootstrappedError is expected.
if _, ok := err.(*storage.NotBootstrappedError); !ok {
return err
}
} else {
anyStoreBootstrapped = true
break
}
}
if anyStoreBootstrapped {
sleepDuration := s.clock.MaxOffset() - timeutil.Since(startTime)
if sleepDuration > 0 {
log.Infof(ctx, "sleeping for %s to guarantee HLC monotonicity", sleepDuration)
time.Sleep(sleepDuration)
}
}
}
// Now that we have a monotonic HLC wrt previous incarnations of the process,
// init all the replicas.
err = s.node.start(
ctx,
unresolvedAdvertAddr,
s.engines,
s.cfg.NodeAttributes,
s.cfg.Locality,
)
if err != nil {
return err
}
log.Event(ctx, "started node")
s.nodeLiveness.StartHeartbeat(ctx, s.stopper)
// We can now add the node registry.
s.recorder.AddNode(s.registry, s.node.Descriptor, s.node.startedAt)
// Begin recording runtime statistics.
s.startSampleEnvironment(s.cfg.MetricsSampleInterval)
// Begin recording time series data collected by the status monitor.
s.tsDB.PollSource(
s.cfg.AmbientCtx, s.recorder, s.cfg.MetricsSampleInterval, ts.Resolution10s, s.stopper,
开发者ID:hvaara,项目名称:cockroach,代码行数:67,代码来源:server.go
示例20: applySnapshot
// applySnapshot updates the replica based on the given snapshot and associated
// HardState (which may be empty, as Raft may apply some snapshots which don't
// require an update to the HardState). All snapshots must pass through Raft
// for correctness, i.e. the parameters to this method must be taken from
// a raft.Ready. It is the caller's responsibility to call
// r.store.processRangeDescriptorUpdate(r) after a successful applySnapshot.
func (r *Replica) applySnapshot(
ctx context.Context, inSnap IncomingSnapshot, snap raftpb.Snapshot, hs raftpb.HardState,
) error {
// Extract the updated range descriptor.
desc := inSnap.RangeDescriptor
r.mu.Lock()
replicaID := r.mu.replicaID
raftLogSize := r.mu.raftLogSize
r.mu.Unlock()
isPreemptive := replicaID == 0 // only used for accounting and log format
var appliedSuccessfully bool
defer func() {
if appliedSuccessfully {
if !isPreemptive {
r.store.metrics.RangeSnapshotsNormalApplied.Inc(1)
} else {
r.store.metrics.RangeSnapshotsPreemptiveApplied.Inc(1)
}
}
}()
if raft.IsEmptySnap(snap) {
// Raft discarded the snapshot, indicating that our local state is
// already ahead of what the snapshot provides. But we count it for
// stats (see the defer above).
appliedSuccessfully = true
return nil
}
snapType := "preemptive"
if !isPreemptive {
snapType = "Raft"
}
log.Infof(ctx, "applying %s snapshot at index %d "+
"(id=%s, encoded size=%d, %d rocksdb batches, %d log entries)",
snapType, snap.Metadata.Index, inSnap.SnapUUID.Short(),
len(snap.Data), len(inSnap.Batches), len(inSnap.LogEntries))
defer func(start time.Time) {
log.Infof(ctx, "applied %s snapshot in %.3fs",
snapType, timeutil.Since(start).Seconds())
}(timeutil.Now())
batch := r.store.Engine().NewBatch()
defer batch.Close()
// Clear the range using a distinct batch in order to prevent the iteration
// from forcing the batch to flush from Go to C++.
distinctBatch := batch.Distinct()
// Delete everything in the range and recreate it from the snapshot.
// We need to delete any old Raft log entries here because any log entries
// that predate the snapshot will be orphaned and never truncated or GC'd.
iter := NewReplicaDataIterator(&desc, distinctBatch, false /* !replicatedOnly */)
defer iter.Close()
for ; iter.Valid(); iter.Next() {
if err := distinctBatch.Clear(iter.Key()); err != nil {
return err
}
}
distinctBatch.Close()
// Write the snapshot into the range.
for _, batchRepr := range inSnap.Batches {
if err := batch.ApplyBatchRepr(batchRepr); err != nil {
return err
}
}
// The log entries are all written to distinct keys so we can use a
// distinct batch.
distinctBatch = batch.Distinct()
logEntries := make([]raftpb.Entry, len(inSnap.LogEntries))
for i, bytes := range inSnap.LogEntries {
if err := logEntries[i].Unmarshal(bytes); err != nil {
return err
}
}
// Write the snapshot's Raft log into the range.
_, raftLogSize, err := r.append(ctx, distinctBatch, 0, raftLogSize, logEntries)
if err != nil {
return err
}
if !raft.IsEmptyHardState(hs) {
if err := setHardState(ctx, distinctBatch, r.RangeID, hs); err != nil {
return errors.Wrapf(err, "unable to persist HardState %+v", &hs)
}
} else {
// Note that we don't require that Raft supply us with a nonempty
//.........这里部分代码省略.........
开发者ID:bdarnell,项目名称:cockroach,代码行数:101,代码来源:replica_raftstorage.go
注:本文中的github.com/cockroachdb/cockroach/pkg/util/timeutil.Since函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论