本文整理汇总了Golang中github.com/cockroachdb/cockroach/roachpb.BatchResponse类的典型用法代码示例。如果您正苦于以下问题:Golang BatchResponse类的具体用法?Golang BatchResponse怎么用?Golang BatchResponse使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了BatchResponse类的18个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: resendWithTxn
// TODO(tschottdorf): this method is somewhat awkward but unless we want to
// give this error back to the client, our options are limited. We'll have to
// run the whole thing for them, or any restart will still end up at the client
// which will not be prepared to be handed a Txn.
func (tc *TxnCoordSender) resendWithTxn(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
// Run a one-off transaction with that single command.
if log.V(1) {
log.Infof("%s: auto-wrapping in txn and re-executing: ", ba)
}
tmpDB := client.NewDBWithPriority(tc, ba.UserPriority)
var br *roachpb.BatchResponse
pErr := tmpDB.Txn(func(txn *client.Txn) *roachpb.Error {
txn.SetDebugName("auto-wrap", 0)
b := txn.NewBatch()
b.MaxScanResults = ba.MaxScanResults
for _, arg := range ba.Requests {
req := arg.GetInner()
b.InternalAddRequest(req)
}
var pErr *roachpb.Error
br, pErr = txn.CommitInBatchWithResponse(b)
return pErr
})
if pErr != nil {
return nil, pErr
}
br.Txn = nil // hide the evidence
return br, nil
}
开发者ID:petermattis,项目名称:cockroach,代码行数:29,代码来源:txn_coord_sender.go
示例2: executeCmd
// executeCmd interprets the given message as a *roachpb.BatchRequest and sends it
// via the local sender.
func (n *Node) executeCmd(argsI proto.Message) (proto.Message, error) {
ba := argsI.(*roachpb.BatchRequest)
var br *roachpb.BatchResponse
f := func() {
// TODO(tschottdorf) get a hold of the client's ID, add it to the
// context before dispatching, and create an ID for tracing the request.
sp := n.ctx.Tracer.StartSpan("node")
defer sp.Finish()
ctx, _ := opentracing.ContextWithSpan((*Node)(n).context(), sp)
tStart := time.Now()
var pErr *roachpb.Error
br, pErr = n.stores.Send(ctx, *ba)
if pErr != nil {
br = &roachpb.BatchResponse{}
sp.LogEvent(fmt.Sprintf("error: %T", pErr.GetDetail()))
}
if br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(n.stores, br))
}
n.feed.CallComplete(*ba, time.Now().Sub(tStart), pErr)
br.Error = pErr
}
if !n.stopper.RunTask(f) {
return nil, util.Errorf("node %d stopped", n.Descriptor.NodeID)
}
return br, nil
}
开发者ID:guanqun,项目名称:cockroach,代码行数:32,代码来源:node.go
示例3: Send
// Batch sends a request to Cockroach via RPC. Errors which are retryable are
// retried with backoff in a loop using the default retry options. Other errors
// sending the request are retried indefinitely using the same client command
// ID to avoid reporting failure when in fact the command may have gone through
// and been executed successfully. We retry here to eventually get through with
// the same client command ID and be given the cached response.
func (s *rpcSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
var err error
var br roachpb.BatchResponse
for r := retry.Start(s.retryOpts); r.Next(); {
select {
case <-s.client.Healthy():
default:
err = fmt.Errorf("failed to send RPC request %s: client is unhealthy", method)
log.Warning(err)
continue
}
if err = s.client.Call(method, &ba, &br); err != nil {
br.Reset() // don't trust anyone.
// Assume all errors sending request are retryable. The actual
// number of things that could go wrong is vast, but we don't
// want to miss any which should in theory be retried with the
// same client command ID. We log the error here as a warning so
// there's visibility that this is happening. Some of the errors
// we'll sweep up in this net shouldn't be retried, but we can't
// really know for sure which.
log.Warningf("failed to send RPC request %s: %s", method, err)
continue
}
// On successful post, we're done with retry loop.
break
}
if err != nil {
return nil, roachpb.NewError(err)
}
pErr := br.Error
br.Error = nil
return &br, pErr
}
开发者ID:mbertschler,项目名称:cockroach,代码行数:41,代码来源:rpc_sender.go
示例4: newTestSender
// TestSender mocks out some of the txn coordinator sender's
// functionality. It responds to PutRequests using testPutResp.
func newTestSender(pre, post func(roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error)) SenderFunc {
txnKey := roachpb.Key("test-txn")
txnID := uuid.NewV4()
return func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
if ba.UserPriority == 0 {
ba.UserPriority = 1
}
if ba.Txn != nil && ba.Txn.ID == nil {
ba.Txn.Key = txnKey
ba.Txn.ID = txnID
}
var br *roachpb.BatchResponse
var pErr *roachpb.Error
if pre != nil {
br, pErr = pre(ba)
} else {
br = ba.CreateReply()
}
if pErr != nil {
return nil, pErr
}
var writing bool
status := roachpb.PENDING
for i, req := range ba.Requests {
args := req.GetInner()
if _, ok := args.(*roachpb.PutRequest); ok {
testPutRespCopy := testPutResp
union := &br.Responses[i] // avoid operating on copy
union.MustSetInner(&testPutRespCopy)
}
if roachpb.IsTransactionWrite(args) {
writing = true
}
}
if args, ok := ba.GetArg(roachpb.EndTransaction); ok {
et := args.(*roachpb.EndTransactionRequest)
writing = true
if et.Commit {
status = roachpb.COMMITTED
} else {
status = roachpb.ABORTED
}
}
if ba.Txn != nil {
txnClone := ba.Txn.Clone()
br.Txn = &txnClone
if pErr == nil {
br.Txn.Writing = writing
br.Txn.Status = status
}
}
if post != nil {
br, pErr = post(ba)
}
return br, pErr
}
}
开发者ID:petermattis,项目名称:cockroach,代码行数:62,代码来源:txn_test.go
示例5: resendWithTxn
// TODO(tschottdorf): this method is somewhat awkward but unless we want to
// give this error back to the client, our options are limited. We'll have to
// run the whole thing for them, or any restart will still end up at the client
// which will not be prepared to be handed a Txn.
func (tc *TxnCoordSender) resendWithTxn(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
// Run a one-off transaction with that single command.
if log.V(1) {
log.Infof(tc.ctx, "%s: auto-wrapping in txn and re-executing: ", ba)
}
// TODO(bdarnell): need to be able to pass other parts of DBContext
// through here.
dbCtx := client.DefaultDBContext()
dbCtx.UserPriority = ba.UserPriority
tmpDB := client.NewDBWithContext(tc, dbCtx)
var br *roachpb.BatchResponse
err := tmpDB.Txn(context.TODO(), func(txn *client.Txn) error {
txn.SetDebugName("auto-wrap", 0)
b := txn.NewBatch()
b.Header = ba.Header
for _, arg := range ba.Requests {
req := arg.GetInner()
b.AddRawRequest(req)
}
err := txn.CommitInBatch(b)
br = b.RawResponse()
return err
})
if err != nil {
return nil, roachpb.NewError(err)
}
br.Txn = nil // hide the evidence
return br, nil
}
开发者ID:yaojingguo,项目名称:cockroach,代码行数:33,代码来源:txn_coord_sender.go
示例6: executeCmd
// executeCmd interprets the given message as a *roachpb.BatchRequest and sends it
// via the local sender.
func (s *DBServer) executeCmd(argsI proto.Message) (proto.Message, error) {
var br *roachpb.BatchResponse
var err error
f := func() {
ba := argsI.(*roachpb.BatchRequest)
if err = verifyRequest(ba); err != nil {
return
}
var pErr *roachpb.Error
br, pErr = s.sender.Send(context.TODO(), *ba)
if pErr != nil {
br = &roachpb.BatchResponse{}
}
if br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(s.sender, br))
}
br.Error = pErr
}
if !s.stopper.RunTask(f) {
err = util.Errorf("node stopped")
}
return br, err
}
开发者ID:liugangnhm,项目名称:cockroach,代码行数:27,代码来源:db.go
示例7: newTestSender
// TestSender mocks out some of the txn coordinator sender's
// functionality. It responds to PutRequests using testPutResp.
func newTestSender(pre, post func(roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error)) SenderFunc {
txnKey := roachpb.Key("test-txn")
txnID := []byte(uuid.NewUUID4())
return func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
ba.UserPriority = 1
if ba.Txn != nil && len(ba.Txn.ID) == 0 {
ba.Txn.Key = txnKey
ba.Txn.ID = txnID
}
var br *roachpb.BatchResponse
var pErr *roachpb.Error
if pre != nil {
br, pErr = pre(ba)
} else {
br = ba.CreateReply()
}
if pErr != nil {
return nil, pErr
}
var writing bool
status := roachpb.PENDING
for i, req := range ba.Requests {
args := req.GetInner()
if _, ok := args.(*roachpb.PutRequest); ok {
if !br.Responses[i].SetValue(proto.Clone(testPutResp).(roachpb.Response)) {
panic("failed to set put response")
}
}
if roachpb.IsTransactionWrite(args) {
writing = true
}
}
if args, ok := ba.GetArg(roachpb.EndTransaction); ok {
et := args.(*roachpb.EndTransactionRequest)
writing = true
if et.Commit {
status = roachpb.COMMITTED
} else {
status = roachpb.ABORTED
}
}
if ba.Txn != nil {
txnClone := ba.Txn.Clone()
br.Txn = &txnClone
if pErr == nil {
br.Txn.Writing = writing
br.Txn.Status = status
}
}
if post != nil {
br, pErr = post(ba)
}
return br, pErr
}
}
开发者ID:JackKrupansky,项目名称:cockroach,代码行数:60,代码来源:txn_test.go
示例8: executeCmd
// executeCmd interprets the given message as a *roachpb.BatchRequest and sends it
// via the local sender.
func (n *Node) executeCmd(argsI proto.Message) (proto.Message, error) {
ba := argsI.(*roachpb.BatchRequest)
var br *roachpb.BatchResponse
opName := "node " + strconv.Itoa(int(n.Descriptor.NodeID)) // could save allocs here
fail := func(err error) {
br = &roachpb.BatchResponse{}
br.Error = roachpb.NewError(err)
}
f := func() {
sp, err := tracing.JoinOrNew(n.ctx.Tracer, ba.Trace, opName)
if err != nil {
fail(err)
return
}
// If this is a snowball span, it gets special treatment: It skips the
// regular tracing machinery, and we instead send the collected spans
// back with the response. This is more expensive, but then again,
// those are individual requests traced by users, so they can be.
if sp.BaggageItem(tracing.Snowball) != "" {
if sp, err = tracing.JoinOrNewSnowball(opName, ba.Trace, func(rawSpan basictracer.RawSpan) {
encSp, err := tracing.EncodeRawSpan(&rawSpan, nil)
if err != nil {
log.Warning(err)
}
br.CollectedSpans = append(br.CollectedSpans, encSp)
}); err != nil {
fail(err)
return
}
}
defer sp.Finish()
ctx := opentracing.ContextWithSpan((*Node)(n).context(), sp)
tStart := time.Now()
var pErr *roachpb.Error
br, pErr = n.stores.Send(ctx, *ba)
if pErr != nil {
br = &roachpb.BatchResponse{}
sp.LogEvent(fmt.Sprintf("error: %T", pErr.GetDetail()))
}
if br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(n.stores, br))
}
n.metrics.callComplete(time.Now().Sub(tStart), pErr)
br.Error = pErr
}
if !n.stopper.RunTask(f) {
return nil, util.Errorf("node %d stopped", n.Descriptor.NodeID)
}
return br, nil
}
开发者ID:liugangnhm,项目名称:cockroach,代码行数:56,代码来源:node.go
示例9: newTestSender
func newTestSender(pre, post func(roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error)) SenderFunc {
txnKey := roachpb.Key("test-txn")
txnID := []byte(uuid.NewUUID4())
return func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
ba.UserPriority = proto.Int32(-1)
if ba.Txn != nil && len(ba.Txn.ID) == 0 {
ba.Txn.Key = txnKey
ba.Txn.ID = txnID
}
var br *roachpb.BatchResponse
var pErr *roachpb.Error
if pre != nil {
br, pErr = pre(ba)
} else {
br = ba.CreateReply()
}
if pErr != nil {
return nil, pErr
}
var writing bool
status := roachpb.PENDING
if _, ok := ba.GetArg(roachpb.Put); ok {
br.Add(proto.Clone(testPutResp).(roachpb.Response))
writing = true
}
if args, ok := ba.GetArg(roachpb.EndTransaction); ok {
et := args.(*roachpb.EndTransactionRequest)
writing = true
if et.Commit {
status = roachpb.COMMITTED
} else {
status = roachpb.ABORTED
}
}
br.Txn = proto.Clone(ba.Txn).(*roachpb.Transaction)
if br.Txn != nil && pErr == nil {
br.Txn.Writing = writing
br.Txn.Status = status
}
if post != nil {
br, pErr = post(ba)
}
return br, pErr
}
}
开发者ID:nporsche,项目名称:cockroach,代码行数:48,代码来源:txn_test.go
示例10: Batch
// Batch implements the roachpb.KVServer interface.
func (s *DBServer) Batch(ctx context.Context, args *roachpb.BatchRequest) (*roachpb.BatchResponse, error) {
// TODO(marc): this code is duplicated in server/node.go, which should be
// fixed. Also, grpc's authentication model (which gives credential access in
// the request handler) doesn't really fit with the current design of the
// security package (which assumes that TLS state is only given at connection
// time) - that should be fixed.
if peer, ok := peer.FromContext(ctx); ok {
if tlsInfo, ok := peer.AuthInfo.(credentials.TLSInfo); ok {
certUser, err := security.GetCertificateUser(&tlsInfo.State)
if err != nil {
return nil, err
}
if certUser != security.NodeUser {
return nil, util.Errorf("user %s is not allowed", certUser)
}
}
}
var br *roachpb.BatchResponse
var err error
f := func() {
if err = verifyRequest(args); err != nil {
return
}
var pErr *roachpb.Error
br, pErr = s.sender.Send(context.TODO(), *args)
if pErr != nil {
br = &roachpb.BatchResponse{}
}
if br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(s.sender, br))
}
br.Error = pErr
}
if !s.stopper.RunTask(f) {
err = util.Errorf("node stopped")
}
return br, err
}
开发者ID:GitGoldie,项目名称:cockroach,代码行数:42,代码来源:db.go
示例11: sendChunk
// sendChunk is in charge of sending an "admissible" piece of batch, i.e. one
// which doesn't need to be subdivided further before going to a range (so no
// mixing of forward and reverse scans, etc). The parameters and return values
// correspond to client.Sender with the exception of the returned boolean,
// which is true when indicating that the caller should retry but needs to send
// EndTransaction in a separate request.
func (ds *DistSender) sendChunk(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error, bool) {
isReverse := ba.IsReverse()
// TODO(radu): when contexts are properly plumbed, we should be able to get
// the tracer from ctx, not from the DistSender.
ctx, cleanup := tracing.EnsureContext(ctx, tracing.TracerFromCtx(ds.Ctx))
defer cleanup()
// The minimal key range encompassing all requests contained within.
// Local addressing has already been resolved.
// TODO(tschottdorf): consider rudimentary validation of the batch here
// (for example, non-range requests with EndKey, or empty key ranges).
rs, err := keys.Range(ba)
if err != nil {
return nil, roachpb.NewError(err), false
}
var br *roachpb.BatchResponse
// Send the request to one range per iteration.
for {
// Increase the sequence counter only once before sending RPCs to
// the ranges involved in this chunk of the batch (as opposed to for
// each RPC individually). On RPC errors, there's no guarantee that
// the request hasn't made its way to the target regardless of the
// error; we'd like the second execution to be caught by the sequence
// cache if that happens. There is a small chance that that we address
// a range twice in this chunk (stale/suboptimal descriptors due to
// splits/merges) which leads to a transaction retry.
// TODO(tschottdorf): it's possible that if we don't evict from the
// cache we could be in for a busy loop.
ba.SetNewRequest()
var curReply *roachpb.BatchResponse
var desc *roachpb.RangeDescriptor
var evictToken *evictionToken
var needAnother bool
var pErr *roachpb.Error
var finished bool
var numAttempts int
for r := retry.StartWithCtx(ctx, ds.rpcRetryOptions); r.Next(); {
numAttempts++
{
const magicLogCurAttempt = 20
var seq int32
if ba.Txn != nil {
seq = ba.Txn.Sequence
}
if numAttempts%magicLogCurAttempt == 0 || seq%magicLogCurAttempt == 0 {
// Log a message if a request appears to get stuck for a long
// time or, potentially, forever. See #8975.
// The local counter captures this loop here; the Sequence number
// should capture anything higher up (as it needs to be
// incremented every time this method is called).
log.Warningf(
ctx,
"%d retries for an RPC at sequence %d, last error was: %s, remaining key ranges %s: %s",
numAttempts, seq, pErr, rs, ba,
)
}
}
// Get range descriptor (or, when spanning range, descriptors). Our
// error handling below may clear them on certain errors, so we
// refresh (likely from the cache) on every retry.
log.Trace(ctx, "meta descriptor lookup")
var err error
desc, needAnother, evictToken, err = ds.getDescriptors(ctx, rs, evictToken, isReverse)
// getDescriptors may fail retryably if, for example, the first
// range isn't available via Gossip. Assume that all errors at
// this level are retryable. Non-retryable errors would be for
// things like malformed requests which we should have checked
// for before reaching this point.
if err != nil {
log.Trace(ctx, "range descriptor lookup failed: "+err.Error())
if log.V(1) {
log.Warning(ctx, err)
}
pErr = roachpb.NewError(err)
continue
}
if needAnother && br == nil {
// TODO(tschottdorf): we should have a mechanism for discovering
// range merges (descriptor staleness will mostly go unnoticed),
// or we'll be turning single-range queries into multi-range
// queries for no good reason.
// If there's no transaction and op spans ranges, possibly
// re-run as part of a transaction for consistency. The
// case where we don't need to re-run is if the read
// consistency is not required.
if ba.Txn == nil && ba.IsPossibleTransaction() &&
//.........这里部分代码省略.........
开发者ID:yaojingguo,项目名称:cockroach,代码行数:101,代码来源:dist_sender.go
示例12: updateState
// updateState updates the transaction state in both the success and
// error cases, applying those updates to the corresponding txnMeta
// object when adequate. It also updates certain errors with the
// updated transaction for use by client restarts.
func (tc *TxnCoordSender) updateState(
startNS int64, ctx context.Context, ba roachpb.BatchRequest,
br *roachpb.BatchResponse, pErr *roachpb.Error) *roachpb.Error {
newTxn := &roachpb.Transaction{}
newTxn.Update(ba.Txn)
if pErr == nil {
newTxn.Update(br.Txn)
} else {
newTxn.Update(pErr.GetTxn())
}
switch t := pErr.GetDetail().(type) {
case *roachpb.TransactionStatusError:
// Likely already committed or more obscure errors such as epoch or
// timestamp regressions; consider txn dead.
defer tc.cleanupTxn(ctx, *pErr.GetTxn())
case *roachpb.OpRequiresTxnError:
panic("OpRequiresTxnError must not happen at this level")
case *roachpb.ReadWithinUncertaintyIntervalError:
// If the reader encountered a newer write within the uncertainty
// interval, we advance the txn's timestamp just past the last observed
// timestamp from the node.
restartTS, ok := newTxn.GetObservedTimestamp(pErr.OriginNode)
if !ok {
pErr = roachpb.NewError(util.Errorf("no observed timestamp for node %d found on uncertainty restart", pErr.OriginNode))
} else {
newTxn.Timestamp.Forward(restartTS)
newTxn.Restart(ba.UserPriority, newTxn.Priority, newTxn.Timestamp)
}
case *roachpb.TransactionAbortedError:
// Increase timestamp if applicable.
newTxn.Timestamp.Forward(pErr.GetTxn().Timestamp)
newTxn.Priority = pErr.GetTxn().Priority
// Clean up the freshly aborted transaction in defer(), avoiding a
// race with the state update below.
defer tc.cleanupTxn(ctx, *newTxn)
case *roachpb.TransactionPushError:
// Increase timestamp if applicable, ensuring that we're
// just ahead of the pushee.
newTxn.Timestamp.Forward(t.PusheeTxn.Timestamp)
newTxn.Restart(ba.UserPriority, t.PusheeTxn.Priority-1, newTxn.Timestamp)
case *roachpb.TransactionRetryError:
// Increase timestamp so on restart, we're ahead of any timestamp
// cache entries or newer versions which caused the restart.
newTxn.Restart(ba.UserPriority, pErr.GetTxn().Priority, newTxn.Timestamp)
case *roachpb.WriteTooOldError:
newTxn.Restart(ba.UserPriority, newTxn.Priority, t.ActualTimestamp)
case nil:
// Nothing to do here, avoid the default case.
default:
if pErr.GetTxn() != nil {
if pErr.CanRetry() {
panic("Retryable internal error must not happen at this level")
} else {
// Do not clean up the transaction here since the client might still
// want to continue the transaction. For example, a client might
// continue its transaction after receiving ConditionFailedError, which
// can come from a unique index violation.
}
}
}
if pErr != nil && pErr.GetTxn() != nil {
// Avoid changing existing errors because sometimes they escape into
// goroutines and then there are races. Fairly sure there isn't one
// here, but better safe than sorry.
pErrShallow := *pErr
pErrShallow.SetTxn(newTxn)
pErr = &pErrShallow
}
if newTxn.ID == nil {
return pErr
}
txnID := *newTxn.ID
tc.Lock()
defer tc.Unlock()
txnMeta := tc.txns[txnID]
// For successful transactional requests, keep the written intents and
// the updated transaction record to be sent along with the reply.
// The transaction metadata is created with the first writing operation.
// A tricky edge case is that of a transaction which "fails" on the
// first writing request, but actually manages to write some intents
// (for example, due to being multi-range). In this case, there will
// be an error, but the transaction will be marked as Writing and the
// coordinator must track the state, for the client's retry will be
// performed with a Writing transaction which the coordinator rejects
// unless it is tracking it (on top of it making sense to track it;
// after all, it **has** laid down intents and only the coordinator
// can augment a potential EndTransaction call). See #3303.
var intentGroup interval.RangeGroup
if txnMeta != nil {
intentGroup = txnMeta.keys
} else if pErr == nil || newTxn.Writing {
intentGroup = interval.NewRangeTree()
}
//.........这里部分代码省略.........
开发者ID:petermattis,项目名称:cockroach,代码行数:101,代码来源:txn_coord_sender.go
示例13: Batch
// Batch implements the roachpb.KVServer interface.
func (n *Node) Batch(ctx context.Context, args *roachpb.BatchRequest) (*roachpb.BatchResponse, error) {
// TODO(marc): this code is duplicated in kv/db.go, which should be fixed.
// Also, grpc's authentication model (which gives credential access in the
// request handler) doesn't really fit with the current design of the
// security package (which assumes that TLS state is only given at connection
// time) - that should be fixed.
if peer, ok := peer.FromContext(ctx); ok {
if tlsInfo, ok := peer.AuthInfo.(credentials.TLSInfo); ok {
certUser, err := security.GetCertificateUser(&tlsInfo.State)
if err != nil {
return nil, err
}
if certUser != security.NodeUser {
return nil, util.Errorf("user %s is not allowed", certUser)
}
}
}
var br *roachpb.BatchResponse
opName := "node " + strconv.Itoa(int(n.Descriptor.NodeID)) // could save allocs here
fail := func(err error) {
br = &roachpb.BatchResponse{}
br.Error = roachpb.NewError(err)
}
f := func() {
sp, err := tracing.JoinOrNew(n.ctx.Tracer, args.Trace, opName)
if err != nil {
fail(err)
return
}
// If this is a snowball span, it gets special treatment: It skips the
// regular tracing machinery, and we instead send the collected spans
// back with the response. This is more expensive, but then again,
// those are individual requests traced by users, so they can be.
if sp.BaggageItem(tracing.Snowball) != "" {
sp.LogEvent("delegating to snowball tracing")
sp.Finish()
if sp, err = tracing.JoinOrNewSnowball(opName, args.Trace, func(rawSpan basictracer.RawSpan) {
encSp, err := tracing.EncodeRawSpan(&rawSpan, nil)
if err != nil {
log.Warning(err)
}
br.CollectedSpans = append(br.CollectedSpans, encSp)
}); err != nil {
fail(err)
return
}
}
defer sp.Finish()
traceCtx := opentracing.ContextWithSpan(n.context(ctx), sp)
tStart := timeutil.Now()
var pErr *roachpb.Error
br, pErr = n.stores.Send(traceCtx, *args)
if pErr != nil {
br = &roachpb.BatchResponse{}
log.Trace(traceCtx, fmt.Sprintf("error: %T", pErr.GetDetail()))
}
if br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(n.stores, br))
}
n.metrics.callComplete(timeutil.Since(tStart), pErr)
br.Error = pErr
}
if !n.stopper.RunTask(f) {
return nil, util.Errorf("node %d stopped", n.Descriptor.NodeID)
}
return br, nil
}
开发者ID:GitGoldie,项目名称:cockroach,代码行数:73,代码来源:node.go
示例14: sendChunk
// sendChunk is in charge of sending an "admissible" piece of batch, i.e. one
// which doesn't need to be subdivided further before going to a range (so no
// mixing of forward and reverse scans, etc). The parameters and return values
// correspond to client.Sender with the exception of the returned boolean,
// which is true when indicating that the caller should retry but needs to send
// EndTransaction in a separate request.
func (ds *DistSender) sendChunk(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error, bool) {
isReverse := ba.IsReverse()
ctx, cleanup := tracing.EnsureContext(ctx, ds.Tracer)
defer cleanup()
// The minimal key range encompassing all requests contained within.
// Local addressing has already been resolved.
// TODO(tschottdorf): consider rudimentary validation of the batch here
// (for example, non-range requests with EndKey, or empty key ranges).
rs, err := keys.Range(ba)
if err != nil {
return nil, roachpb.NewError(err), false
}
var br *roachpb.BatchResponse
// Send the request to one range per iteration.
for {
// Increase the sequence counter only once before sending RPCs to
// the ranges involved in this chunk of the batch (as opposed to for
// each RPC individually). On RPC errors, there's no guarantee that
// the request hasn't made its way to the target regardless of the
// error; we'd like the second execution to be caught by the sequence
// cache if that happens. There is a small chance that that we address
// a range twice in this chunk (stale/suboptimal descriptors due to
// splits/merges) which leads to a transaction retry.
// TODO(tschottdorf): it's possible that if we don't evict from the
// cache we could be in for a busy loop.
ba.SetNewRequest()
var curReply *roachpb.BatchResponse
var desc *roachpb.RangeDescriptor
var evictToken evictionToken
var needAnother bool
var pErr *roachpb.Error
var finished bool
for r := retry.Start(ds.rpcRetryOptions); r.Next(); {
// Get range descriptor (or, when spanning range, descriptors). Our
// error handling below may clear them on certain errors, so we
// refresh (likely from the cache) on every retry.
log.Trace(ctx, "meta descriptor lookup")
desc, needAnother, evictToken, pErr = ds.getDescriptors(rs, evictToken, isReverse)
// getDescriptors may fail retryably if the first range isn't
// available via Gossip.
if pErr != nil {
log.Trace(ctx, "range descriptor lookup failed: "+pErr.String())
if pErr.Retryable {
if log.V(1) {
log.Warning(pErr)
}
continue
}
break
} else {
log.Trace(ctx, "looked up range descriptor")
}
if needAnother && br == nil {
// TODO(tschottdorf): we should have a mechanism for discovering
// range merges (descriptor staleness will mostly go unnoticed),
// or we'll be turning single-range queries into multi-range
// queries for no good reason.
// If there's no transaction and op spans ranges, possibly
// re-run as part of a transaction for consistency. The
// case where we don't need to re-run is if the read
// consistency is not required.
if ba.Txn == nil && ba.IsPossibleTransaction() &&
ba.ReadConsistency != roachpb.INCONSISTENT {
return nil, roachpb.NewError(&roachpb.OpRequiresTxnError{}), false
}
// If the request is more than but ends with EndTransaction, we
// want the caller to come again with the EndTransaction in an
// extra call.
if l := len(ba.Requests) - 1; l > 0 && ba.Requests[l].GetInner().Method() == roachpb.EndTransaction {
return nil, roachpb.NewError(errors.New("cannot send 1PC txn to multiple ranges")), true /* shouldSplitET */
}
}
// It's possible that the returned descriptor misses parts of the
// keys it's supposed to scan after it's truncated to match the
// descriptor. Example revscan [a,g), first desc lookup for "g"
// returns descriptor [c,d) -> [d,g) is never scanned.
// We evict and retry in such a case.
includesFrontOfCurSpan := func(rd *roachpb.RangeDescriptor) bool {
if isReverse {
// This approach is needed because rs.EndKey is exclusive.
return desc.ContainsKeyRange(desc.StartKey, rs.EndKey)
}
return desc.ContainsKey(rs.Key)
}
if !includesFrontOfCurSpan(desc) {
if err := evictToken.Evict(); err != nil {
//.........这里部分代码省略.........
开发者ID:chzyer-dev,项目名称:cockroach,代码行数:101,代码来源:dist_sender.go
示例15: updateState
// updateState updates the transaction state in both the success and
// error cases, applying those updates to the corresponding txnMeta
// object when adequate. It also updates certain errors with the
// updated transaction for use by client restarts.
func (tc *TxnCoordSender) updateState(ctx context.Context, ba roachpb.BatchRequest, br *roachpb.BatchResponse, pErr *roachpb.Error) *roachpb.Error {
trace := tracer.FromCtx(ctx)
newTxn := &roachpb.Transaction{}
newTxn.Update(ba.GetTxn())
err := pErr.GoError()
switch t := err.(type) {
case nil:
newTxn.Update(br.GetTxn())
// Move txn timestamp forward to response timestamp if applicable.
// TODO(tschottdorf): see (*Replica).executeBatch and comments within.
// Looks like this isn't necessary any more, nor did it prevent a bug
// referenced in a TODO there.
newTxn.Timestamp.Forward(br.Timestamp)
case *roachpb.TransactionStatusError:
// Likely already committed or more obscure errors such as epoch or
// timestamp regressions; consider txn dead.
defer tc.cleanupTxn(trace, t.Txn)
case *roachpb.OpRequiresTxnError:
// TODO(tschottdorf): range-spanning autowrap currently broken.
panic("TODO(tschottdorf): disabled")
case *roachpb.ReadWithinUncertaintyIntervalError:
// Mark the host as certain. See the protobuf comment for
// Transaction.CertainNodes for details.
if t.NodeID == 0 {
panic("no replica set in header on uncertainty restart")
}
newTxn.CertainNodes.Add(t.NodeID)
// If the reader encountered a newer write within the uncertainty
// interval, move the timestamp forward, just past that write or
// up to MaxTimestamp, whichever comes first.
candidateTS := newTxn.MaxTimestamp
candidateTS.Backward(t.ExistingTimestamp.Add(0, 1))
newTxn.Timestamp.Forward(candidateTS)
newTxn.Restart(ba.GetUserPriority(), newTxn.Priority, newTxn.Timestamp)
t.Txn = *newTxn
case *roachpb.TransactionAbortedError:
// Increase timestamp if applicable.
newTxn.Timestamp.Forward(t.Txn.Timestamp)
newTxn.Priority = t.Txn.Priority
t.Txn = *newTxn
// Clean up the freshly aborted transaction in defer(), avoiding a
// race with the state update below.
defer tc.cleanupTxn(trace, t.Txn)
case *roachpb.TransactionPushError:
// Increase timestamp if applicable, ensuring that we're
// just ahead of the pushee.
newTxn.Timestamp.Forward(t.PusheeTxn.Timestamp.Add(0, 1))
newTxn.Restart(ba.GetUserPriority(), t.PusheeTxn.Priority-1, newTxn.Timestamp)
t.Txn = newTxn
case *roachpb.TransactionRetryError:
// Increase timestamp if applicable.
newTxn.Timestamp.Forward(t.Txn.Timestamp)
newTxn.Restart(ba.GetUserPriority(), t.Txn.Priority, newTxn.Timestamp)
t.Txn = *newTxn
case roachpb.TransactionRestartError:
// Assertion: The above cases should exhaust all ErrorDetails which
// carry a Transaction.
if pErr.Detail != nil {
panic(fmt.Sprintf("unhandled TransactionRestartError %T", err))
}
}
return func() *roachpb.Error {
if len(newTxn.ID) <= 0 {
return pErr
}
id := string(newTxn.ID)
tc.Lock()
defer tc.Unlock()
txnMeta := tc.txns[id]
// For successful transactional requests, keep the written intents and
// the updated transaction record to be sent along with the reply.
// The transaction metadata is created with the first writing operation
// TODO(tschottdorf): already computed the intents prior to sending,
// consider re-using those.
if intents := ba.GetIntents(); len(intents) > 0 && err == nil {
if txnMeta == nil {
newTxn.Writing = true
txnMeta = &txnMetadata{
txn: *newTxn,
keys: cache.NewIntervalCache(cache.Config{Policy: cache.CacheNone}),
firstUpdateNanos: tc.clock.PhysicalNow(),
lastUpdateNanos: tc.clock.PhysicalNow(),
timeoutDuration: tc.clientTimeout,
txnEnd: make(chan struct{}),
}
tc.txns[id] = txnMeta
// If the transaction is already over, there's no point in
// launching a one-off coordinator which will shut down right
// away.
if _, isEnding := ba.GetArg(roachpb.EndTransaction); !isEnding {
trace.Event("coordinator spawns")
if !tc.stopper.RunAsyncTask(func() {
tc.heartbeatLoop(id)
}) {
// The system is already draining and we can't start the
//.........这里部分代码省略.........
开发者ID:rohanahata,项目名称:cockroach,代码行数:101,代码来源:txn_coord_sender.go
示例16: updateState
// updateState updates the transaction state in both the success and
// error cases, applying those updates to the corresponding txnMeta
// object when adequate. It also updates certain errors with the
// updated transaction for use by client restarts.
func (tc *TxnCoordSender) updateState(ctx context.Context, ba roachpb.BatchRequest, br *roachpb.BatchResponse, pErr *roachpb.Error) *roachpb.Error {
trace := tracer.FromCtx(ctx)
newTxn := &roachpb.Transaction{}
newTxn.Update(ba.GetTxn())
// TODO(tamird): remove this clone. It's currently needed to avoid race conditions.
pErr = proto.Clone(pErr).(*roachpb.Error)
err := pErr.GoError()
// TODO(bdarnell): We're writing to errors here (and where using ErrorWithIndex);
// since there's no concept of ownership copy-on-write is always preferable.
switch t := err.(type) {
case nil:
newTxn.Update(br.Txn)
// Move txn timestamp forward to response timestamp if applicable.
// TODO(tschottdorf): see (*Replica).executeBatch and comments within.
// Looks like this isn't necessary any more, nor did it prevent a bug
// referenced in a TODO there.
newTxn.Timestamp.Forward(br.Timestamp)
case *roachpb.TransactionStatusError:
// Likely already committed or more obscure errors such as epoch or
// timestamp regressions; consider txn dead.
defer tc.cleanupTxn(trace, t.Txn)
case *roachpb.OpRequiresTxnError:
panic("OpRequiresTxnError must not happen at this level")
case *roachpb.ReadWithinUncertaintyIntervalError:
// Mark the host as certain. See the protobuf comment for
// Transaction.CertainNodes for details.
if t.NodeID == 0 {
panic("no replica set in header on uncertainty restart")
}
newTxn.Update(&t.Txn)
newTxn.CertainNodes.Add(t.NodeID)
// If the reader encountered a newer write within the uncertainty
// interval, move the timestamp forward, just past that write or
// up to MaxTimestamp, whichever comes first.
candidateTS := newTxn.MaxTimestamp
candidateTS.Backward(t.ExistingTimestamp.Add(0, 1))
newTxn.Timestamp.Forward(candidateTS)
newTxn.Restart(ba.GetUserPriority(), newTxn.Priority, newTxn.Timestamp)
t.Txn = *newTxn
case *roachpb.TransactionAbortedError:
trace.SetError()
newTxn.Update(&t.Txn)
// Increase timestamp if applicable.
newTxn.Timestamp.Forward(t.Txn.Timestamp)
newTxn.Priority = t.Txn.Priority
t.Txn = *newTxn
// Clean up the freshly aborted transaction in defer(), avoiding a
// race with the state update below.
defer tc.cleanupTxn(trace, t.Txn)
case *roachpb.TransactionPushError:
newTxn.Update(t.Txn)
// Increase timestamp if applicable, ensuring that we're
// just ahead of the pushee.
newTxn.Timestamp.Forward(t.PusheeTxn.Timestamp.Add(0, 1))
newTxn.Restart(ba.GetUserPriority(), t.PusheeTxn.Priority-1, newTxn.Timestamp)
t.Txn = newTxn
case *roachpb.TransactionRetryError:
newTxn.Update(&t.Txn)
newTxn.Restart(ba.GetUserPriority(), t.Txn.Priority, newTxn.Timestamp)
t.Txn = *newTxn
case roachpb.TransactionRestartError:
// Assertion: The above cases should exhaust all ErrorDetails which
// carry a Transaction.
if pErr.Detail != nil {
panic(fmt.Sprintf("unhandled TransactionRestartError %T", err))
}
default:
trace.SetError()
}
return func() *roachpb.Error {
if len(newTxn.ID) <= 0 {
return pErr
}
id := string(newTxn.ID)
tc.Lock()
defer tc.Unlock()
txnMeta := tc.txns[id]
// For successful transactional requests, keep the written intents and
// the updated transaction record to be sent along with the reply.
// The transaction metadata is created with the first writing operation.
// A tricky edge case is that of a transaction which "fails" on the
// first writing request, but actually manages to write some intents
// (for example, due to being multi-range). In this case, there will
// be an error, but the transaction will be marked as Writing and the
// coordinator must track the state, for the client's retry will be
// performed with a Writing transaction which the coordinator rejects
// unless it is tracking it (on top of it making sense to track it;
// after all, it **has** laid down intents and only the coordinator
// can augment a potential EndTransaction call).
// consider re-using those.
if intents := ba.GetIntents(); len(intents) > 0 && (err == nil || newTxn.Writing) {
if txnMeta == nil {
if !newTxn.Writing {
panic("txn with intents marked as non-writing")
}
//.........这里部分代码省略.........
开发者ID:rissoa,项目名称:cockroach,代码行数:101,代码来源:txn_coord_sender.go
示例17: sendChunk
// sendChunk is in charge of sending an "admissible" piece of batch, i.e. one
// which doesn't need to be subdivided further before going to a range (so no
// mixing of forward and reverse scans, etc). The parameters and return values
// correspond to client.Sender with the exception of the returned boolean,
// which is true when indicating that the caller should retry but needs to send
// EndTransaction in a separate request.
func (ds *DistSender) sendChunk(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error, bool) {
isReverse := ba.IsReverse()
trace := tracer.FromCtx(ctx)
// The minimal key range encompassing all requests contained within.
// Local addressing has already been resolved.
// TODO(tschottdorf): consider rudimentary validation of the batch here
// (for example, non-range requests with EndKey, or empty key ranges).
rs := keys.Range(ba)
var br *roachpb.BatchResponse
// Send the request to one range per iteration.
for {
considerIntents := false
var curReply *roachpb.BatchResponse
var desc *roachpb.RangeDescriptor
var needAnother bool
var pErr *roachpb.Error
for r := retry.Start(ds.rpcRetryOptions); r.Next(); {
// Get range descriptor (or, when spanning range, descriptors). Our
// error handling below may clear them on certain errors, so we
// refresh (likely from the cache) on every retry.
descDone := trace.Epoch("meta descriptor lookup")
var evictDesc func()
desc, needAnother, evictDesc, pErr = ds.getDescriptors(rs, considerIntents, isReverse)
descDone()
// getDescriptors may fail retryably if the first range isn't
// available via Gossip.
if pErr != nil {
if pErr.Retryable {
if log.V(1) {
log.Warning(pErr)
}
continue
}
break
}
if needAnother && br == nil {
// TODO(tschottdorf): we should have a mechanism for discovering
// range merges (descriptor staleness will mostly go unnoticed),
// or we'll be turning single-range queries into multi-range
// queries for no good reason.
// If there's no transaction and op spans ranges, possibly
// re-run as part of a transaction for consistency. The
// case where we don't need to re-run is if the read
// consistency is not required.
if ba.Txn == nil && ba.IsPossibleTransaction() &&
ba.ReadConsistency != roachpb.INCONSISTENT {
return nil, roachpb.NewError(&roachpb.OpRequiresTxnError{}), false
}
// If the request is more than but ends with EndTransaction, we
// want the caller to come again with the EndTransaction in an
// extra call.
if l := len(ba.Requests) - 1; l > 0 && ba.Requests[l].GetInner().Method() == roachpb.EndTransaction {
return nil, roachpb.NewError(errors.New("cannot send 1PC txn to multiple ranges")), true /* shouldSplitET */
}
}
// It's possible that the returned descriptor mi
|
请发表评论