本文整理汇总了Golang中github.com/cockroachdb/cockroach/util/log.V函数的典型用法代码示例。如果您正苦于以下问题:Golang V函数的具体用法?Golang V怎么用?Golang V使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了V函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: processReplica
// processReplica processes a single replica. This should not be
// called externally to the queue. bq.mu.Lock should not be held
// while calling this method.
func (bq *baseQueue) processReplica(repl *Replica, clock *hlc.Clock) error {
// Load the system config.
cfg := bq.gossip.GetSystemConfig()
if cfg == nil {
bq.eventLog.Infof(log.V(1), "no system config available. skipping")
return nil
}
desc := repl.Desc()
if !bq.impl.acceptsUnsplitRanges() && cfg.NeedsSplit(desc.StartKey, desc.EndKey) {
// Range needs to be split due to zone configs, but queue does
// not accept unsplit ranges.
bq.eventLog.Infof(log.V(3), "%s: split needed; skipping", repl)
return nil
}
// If the queue requires a replica to have the range leader lease in
// order to be processed, check whether this replica has leader lease
// and renew or acquire if necessary.
if bq.impl.needsLeaderLease() {
// Create a "fake" get request in order to invoke redirectOnOrAcquireLease.
if err := repl.redirectOnOrAcquireLeaderLease(tracing.NilSpan()); err != nil {
bq.eventLog.Infof(log.V(3), "%s: could not acquire leader lease; skipping", repl)
return nil
}
}
bq.eventLog.Infof(log.V(3), "%s: processing", repl)
start := time.Now()
if err := bq.impl.process(clock.Now(), repl, cfg); err != nil {
return err
}
bq.eventLog.Infof(log.V(2), "%s: done: %s", repl, time.Since(start))
return nil
}
开发者ID:JackKrupansky,项目名称:cockroach,代码行数:38,代码来源:queue.go
示例2: writeSummaries
// writeSummaries retrieves status summaries from the supplied
// NodeStatusRecorder and persists them to the cockroach data store.
func (s *Server) writeSummaries() error {
nodeStatus, storeStatuses := s.recorder.GetStatusSummaries()
if nodeStatus != nil {
key := keys.NodeStatusKey(int32(nodeStatus.Desc.NodeID))
if err := s.db.Put(key, nodeStatus); err != nil {
return err
}
if log.V(1) {
statusJSON, err := json.Marshal(nodeStatus)
if err != nil {
log.Errorf("error marshaling nodeStatus to json: %s", err)
}
log.Infof("node %d status: %s", nodeStatus.Desc.NodeID, statusJSON)
}
}
for _, ss := range storeStatuses {
key := keys.StoreStatusKey(int32(ss.Desc.StoreID))
if err := s.db.Put(key, &ss); err != nil {
return err
}
if log.V(1) {
statusJSON, err := json.Marshal(&ss)
if err != nil {
log.Errorf("error marshaling storeStatus to json: %s", err)
}
log.Infof("store %d status: %s", ss.Desc.StoreID, statusJSON)
}
}
return nil
}
开发者ID:gechong,项目名称:cockroach,代码行数:33,代码来源:server.go
示例3: wrap
// wrap the supplied planNode with the sortNode if sorting is required.
func (n *sortNode) wrap(plan planNode) planNode {
if n != nil {
// Check to see if the requested ordering is compatible with the existing
// ordering.
existingOrdering := plan.Ordering()
if log.V(2) {
log.Infof("Sort: existing=%d desired=%d", existingOrdering, n.ordering)
}
match := computeOrderingMatch(n.ordering, existingOrdering, false)
if match < len(n.ordering) {
n.plan = plan
n.needSort = true
return n
}
if len(n.columns) < len(plan.Columns()) {
// No sorting required, but we have to strip off the extra render
// expressions we added.
n.plan = plan
return n
}
}
if log.V(2) {
log.Infof("Sort: no sorting required")
}
return plan
}
开发者ID:ekkotron,项目名称:cockroach,代码行数:29,代码来源:sort.go
示例4: waitAndProcess
// waitAndProcess waits for the pace interval and processes the replica
// if repl is not nil. The method returns true when the scanner needs
// to be stopped. The method also removes a replica from queues when it
// is signaled via the removed channel.
func (rs *replicaScanner) waitAndProcess(start time.Time, clock *hlc.Clock, stopper *stop.Stopper,
repl *Replica) bool {
waitInterval := rs.paceInterval(start, timeutil.Now())
rs.waitTimer.Reset(waitInterval)
if log.V(6) {
log.Infof("Wait time interval set to %s", waitInterval)
}
for {
select {
case <-rs.waitTimer.C:
rs.waitTimer.Read = true
if repl == nil {
return false
}
return !stopper.RunTask(func() {
// Try adding replica to all queues.
for _, q := range rs.queues {
q.MaybeAdd(repl, clock.Now())
}
})
case repl := <-rs.removed:
// Remove replica from all queues as applicable.
for _, q := range rs.queues {
q.MaybeRemove(repl)
}
if log.V(6) {
log.Infof("removed replica %s", repl)
}
case <-stopper.ShouldStop():
return true
}
}
}
开发者ID:cuongdo,项目名称:cockroach,代码行数:38,代码来源:scanner.go
示例5: ShouldRebalance
// ShouldRebalance returns whether the specified store should attempt to
// rebalance a replica to another store.
func (a Allocator) ShouldRebalance(storeID roachpb.StoreID) bool {
if !a.options.AllowRebalance {
return false
}
// In production, add some random jitter to shouldRebalance.
if !a.options.Deterministic && a.randGen.Float32() > rebalanceShouldRebalanceChance {
return false
}
if log.V(2) {
log.Infof("ShouldRebalance from store %d", storeID)
}
storeDesc := a.storePool.getStoreDescriptor(storeID)
if storeDesc == nil {
if log.V(2) {
log.Warningf(
"ShouldRebalance couldn't find store with id %d in StorePool",
storeID)
}
return false
}
sl := a.storePool.getStoreList(*storeDesc.CombinedAttrs(), []roachpb.NodeID{storeDesc.Node.NodeID}, a.options.Deterministic)
// ShouldRebalance is true if a suitable replacement can be found.
return a.balancer.improve(storeDesc, sl) != nil
}
开发者ID:nporsche,项目名称:cockroach,代码行数:28,代码来源:allocator.go
示例6: maybeGossipFirstRange
// maybeGossipFirstRange adds the sentinel and first range metadata to gossip
// if this is the first range and a leader lease can be obtained. The Store
// calls this periodically on first range replicas.
func (r *Replica) maybeGossipFirstRange() error {
if !r.IsFirstRange() {
return nil
}
ctx := r.context()
// Gossip the cluster ID from all replicas of the first range.
if log.V(1) {
log.Infoc(ctx, "gossiping cluster id %s from store %d, range %d", r.rm.ClusterID(),
r.rm.StoreID(), r.Desc().RangeID)
}
if err := r.rm.Gossip().AddInfo(gossip.KeyClusterID, r.rm.ClusterID(), clusterIDGossipTTL); err != nil {
log.Errorc(ctx, "failed to gossip cluster ID: %s", err)
}
if ok, err := r.getLeaseForGossip(ctx); !ok || err != nil {
return err
}
if log.V(1) {
log.Infoc(ctx, "gossiping sentinel from store %d, range %d", r.rm.StoreID(), r.Desc().RangeID)
}
if err := r.rm.Gossip().AddInfo(gossip.KeySentinel, r.rm.ClusterID(), clusterIDGossipTTL); err != nil {
log.Errorc(ctx, "failed to gossip cluster ID: %s", err)
}
if log.V(1) {
log.Infoc(ctx, "gossiping first range from store %d, range %d", r.rm.StoreID(), r.Desc().RangeID)
}
if err := r.rm.Gossip().AddInfo(gossip.KeyFirstRangeDescriptor, *r.Desc(), configGossipTTL); err != nil {
log.Errorc(ctx, "failed to gossip first range metadata: %s", err)
}
return nil
}
开发者ID:donganwangshi,项目名称:cockroach,代码行数:36,代码来源:replica.go
示例7: handleWriteResponse
func (s *state) handleWriteResponse(response *writeResponse) {
log.V(6).Infof("node %v got write response: %#v", s.nodeID, *response)
for groupID, persistedGroup := range response.groups {
g := s.groups[groupID]
if persistedGroup.electionState != nil {
g.persistedElectionState = persistedGroup.electionState
}
if persistedGroup.lastIndex != -1 {
log.V(6).Infof("node %v: updating persisted log index to %v", s.nodeID,
persistedGroup.lastIndex)
s.broadcastEntries(g, persistedGroup.entries)
g.persistedLastIndex = persistedGroup.lastIndex
g.persistedLastTerm = persistedGroup.lastTerm
}
// If we are catching up, commit any newly-persisted entries that the leader
// already considers committed.
s.commitEntries(g, g.leaderCommitIndex)
// Resolve any pending RPCs that have been waiting for persistence to catch up.
var toDelete []*list.Element
for e := g.pendingCalls.Front(); e != nil; e = e.Next() {
call := e.Value.(*pendingCall)
if !s.resolvePendingCall(g, call) {
continue
}
call.call.Done <- call.call
toDelete = append(toDelete, e)
}
for _, e := range toDelete {
g.pendingCalls.Remove(e)
}
s.updateDirtyStatus(g)
}
}
开发者ID:bdotdub,项目名称:cockroach,代码行数:35,代码来源:multiraft.go
示例8: shouldQueue
func (rq replicateQueue) shouldQueue(now proto.Timestamp, repl *Replica) (shouldQ bool, priority float64) {
// If the replica's range spans multiple zones, ignore it until the split
// queue has processed it.
if len(computeSplitKeys(rq.gossip, repl)) > 0 {
return
}
// Load the zone config to find the desired replica attributes.
zone, err := lookupZoneConfig(rq.gossip, repl)
if err != nil {
log.Error(err)
return
}
delta := rq.replicaDelta(zone, repl, repl.Desc())
if delta == 0 {
if log.V(1) {
log.Infof("%s has the correct number of nodes", repl)
}
return false, 0
}
if delta > 0 {
if log.V(1) {
log.Infof("%s needs to add %d nodes", repl, delta)
}
// For ranges which need additional replicas, increase the priority
return true, float64(delta + 10)
}
if log.V(1) {
log.Infof("%s needs to remove %d nodes", repl, 0-delta)
}
// For ranges which have too many replicas, priority is absolute value of
// the delta.
return true, float64(0 - delta)
}
开发者ID:kangxinrong,项目名称:cockroach,代码行数:35,代码来源:replicate_queue.go
示例9: waitAndProcess
// waitAndProcess waits for the pace interval and processes the range
// if rng is not nil. The method returns true when the scanner needs
// to be stopped. The method also removes a range from queues when it
// is signaled via the removed channel.
func (rs *rangeScanner) waitAndProcess(start time.Time, clock *hlc.Clock, stopper *util.Stopper,
rng *Range) bool {
waitInterval := rs.paceInterval(start, time.Now())
nextTime := time.After(waitInterval)
if log.V(6) {
log.Infof("Wait time interval set to %s", waitInterval)
}
for {
select {
case <-nextTime:
if rng == nil {
return false
}
if !stopper.StartTask() {
return true
}
// Try adding range to all queues.
for _, q := range rs.queues {
q.MaybeAdd(rng, clock.Now())
}
stopper.FinishTask()
return false
case rng := <-rs.removed:
// Remove range from all queues as applicable.
for _, q := range rs.queues {
q.MaybeRemove(rng)
}
if log.V(6) {
log.Infof("removed range %s", rng)
}
case <-stopper.ShouldStop():
return true
}
}
}
开发者ID:simonzhangsm,项目名称:cockroach,代码行数:39,代码来源:scanner.go
示例10: writeSummaries
// writeSummaries retrieves status summaries from the supplied
// NodeStatusRecorder and persists them to the cockroach data store.
func (s *Server) writeSummaries() (err error) {
s.stopper.RunTask(func() {
nodeStatus, storeStatuses := s.recorder.GetStatusSummaries()
if nodeStatus != nil {
key := keys.NodeStatusKey(int32(nodeStatus.Desc.NodeID))
if err = s.db.Put(key, nodeStatus); err != nil {
return
}
if log.V(1) {
log.Infof("recorded status for node %d", nodeStatus.Desc.NodeID)
}
}
for _, ss := range storeStatuses {
key := keys.StoreStatusKey(int32(ss.Desc.StoreID))
if err = s.db.Put(key, &ss); err != nil {
return
}
}
if log.V(1) {
log.Infof("recorded status for %d stores", len(storeStatuses))
}
})
return nil
}
开发者ID:Gardenya,项目名称:cockroach,代码行数:27,代码来源:server.go
示例11: improve
// improve returns a candidate StoreDescriptor to rebalance a replica to. The
// strategy is to always converge on the mean range count. If that isn't
// possible, we don't return any candidate.
func (rcb rangeCountBalancer) improve(
sl StoreList, excluded nodeIDSet,
) *roachpb.StoreDescriptor {
// Attempt to select a better candidate from the supplied list.
sl.stores = selectRandom(rcb.rand, allocatorRandomCount, sl, excluded)
candidate := rcb.selectBest(sl)
if candidate == nil {
if log.V(2) {
log.Infof(context.TODO(), "not rebalancing: no valid candidate targets: %s",
formatCandidates(nil, sl.stores))
}
return nil
}
// Adding a replica to the candidate must make its range count converge on the
// mean range count.
if math.Abs(float64(candidate.Capacity.RangeCount+1)-sl.candidateCount.mean) >=
math.Abs(float64(candidate.Capacity.RangeCount)-sl.candidateCount.mean) {
if log.V(2) {
log.Infof(context.TODO(), "not rebalancing: %s wouldn't converge on the mean %.1f",
formatCandidates(candidate, sl.stores), sl.candidateCount.mean)
}
return nil
}
if log.V(2) {
log.Infof(context.TODO(), "rebalancing: mean=%.1f %s",
sl.candidateCount.mean, formatCandidates(candidate, sl.stores))
}
return candidate
}
开发者ID:yangxuanjia,项目名称:cockroach,代码行数:34,代码来源:balancer.go
示例12: runHistoryWithRetry
// runHistoryWithRetry intercepts retry errors. If one is encountered,
// alternate histories are generated which all contain the exact
// history prefix which encountered the error, but which recombine the
// remaining commands with all of the commands from the retrying
// history.
//
// This process continues recursively if there are further retries.
func (hv *historyVerifier) runHistoryWithRetry(priorities []int32,
isolations []enginepb.IsolationType, cmds []*cmd, db *client.DB, t *testing.T) error {
if err := hv.runHistory(priorities, isolations, cmds, db, t); err != nil {
if log.V(1) {
log.Infof(context.Background(), "got an error running history %s: %s", historyString(cmds), err)
}
retry, ok := err.(*retryError)
if !ok {
return err
}
if _, hasRetried := hv.retriedTxns[retry.txnIdx]; hasRetried {
if log.V(1) {
log.Infof(context.Background(), "retried txn %d twice; skipping history", retry.txnIdx+1)
}
return nil
}
hv.retriedTxns[retry.txnIdx] = struct{}{}
// Randomly subsample 5% of histories for reduced execution time.
enumHis := sampleHistories(enumerateHistoriesAfterRetry(retry, cmds), 0.05)
for i, h := range enumHis {
if log.V(1) {
log.Infof(context.Background(), "after retry, running alternate history %d of %d", i, len(enumHis))
}
if err := hv.runHistoryWithRetry(priorities, isolations, h, db, t); err != nil {
return err
}
}
}
return nil
}
开发者ID:yaojingguo,项目名称:cockroach,代码行数:39,代码来源:txn_correctness_test.go
示例13: RefreshLeases
// RefreshLeases starts a goroutine that refreshes the lease manager
// leases for tables received in the latest system configuration via gossip.
func (m *LeaseManager) RefreshLeases(s *stop.Stopper, db *client.DB, gossip *gossip.Gossip) {
s.RunWorker(func() {
descKeyPrefix := keys.MakeTablePrefix(uint32(sqlbase.DescriptorTable.ID))
gossipUpdateC := gossip.RegisterSystemConfigChannel()
for {
select {
case <-gossipUpdateC:
cfg, _ := gossip.GetSystemConfig()
if m.testingKnobs.GossipUpdateEvent != nil {
m.testingKnobs.GossipUpdateEvent(cfg)
}
// Read all tables and their versions
if log.V(2) {
log.Info("received a new config; will refresh leases")
}
// Loop through the configuration to find all the tables.
for _, kv := range cfg.Values {
if !bytes.HasPrefix(kv.Key, descKeyPrefix) {
continue
}
// Attempt to unmarshal config into a table/database descriptor.
var descriptor sqlbase.Descriptor
if err := kv.Value.GetProto(&descriptor); err != nil {
log.Warningf("%s: unable to unmarshal descriptor %v", kv.Key, kv.Value)
continue
}
switch union := descriptor.Union.(type) {
case *sqlbase.Descriptor_Table:
table := union.Table
if err := table.Validate(); err != nil {
log.Errorf("%s: received invalid table descriptor: %v", kv.Key, table)
continue
}
if log.V(2) {
log.Infof("%s: refreshing lease table: %d (%s), version: %d",
kv.Key, table.ID, table.Name, table.Version)
}
// Try to refresh the table lease to one >= this version.
if t := m.findTableState(table.ID, false /* create */, nil); t != nil {
if err := t.purgeOldLeases(
db, table.Deleted(), table.Version, m.LeaseStore); err != nil {
log.Warningf("error purging leases for table %d(%s): %s",
table.ID, table.Name, err)
}
}
case *sqlbase.Descriptor_Database:
// Ignore.
}
}
if m.testingKnobs.TestingLeasesRefreshedEvent != nil {
m.testingKnobs.TestingLeasesRefreshedEvent(cfg)
}
case <-s.ShouldStop():
return
}
}
})
}
开发者ID:JKhawaja,项目名称:cockroach,代码行数:62,代码来源:lease.go
示例14: waitForInstanceGroupOperation
// Repeatedly poll the given operation until its status is DONE, then return its Error.
// We determine whether it's a zone or global operation by parsing its resource link.
// TODO(marc): give up after a while.
func (g *Google) waitForInstanceGroupOperation(op *resourceviews.Operation) error {
// Early out for finished ops.
if op.Status == "DONE" {
if log.V(1) {
log.Infof("Operation %s %s: DONE, err=%v", op.OperationType, op.TargetLink,
errorFromInstanceGroupOperationError(op.Error))
}
return errorFromInstanceGroupOperationError(op.Error)
}
for {
liveOp, err := g.instanceGroupsService.ZoneOperations.Get(g.project, g.zone, op.Name).Do()
// This usually indicates a bad operation object.
if err != nil {
return util.Errorf("could not lookup operation %+v: %s", op, err)
}
if log.V(1) {
log.Infof("Operation %s %s: %s, err=%v", liveOp.OperationType, liveOp.TargetLink,
liveOp.Status, errorFromInstanceGroupOperationError(liveOp.Error))
}
if liveOp.Status == "DONE" {
return errorFromInstanceGroupOperationError(liveOp.Error)
}
time.Sleep(time.Second)
}
}
开发者ID:bdarnell,项目名称:cockroach-prod,代码行数:29,代码来源:instance_groups.go
示例15: clearOverlappingCachedRangeDescriptors
// clearOverlappingCachedRangeDescriptors looks up and clears any
// cache entries which overlap the specified key or descriptor.
func (rdc *rangeDescriptorCache) clearOverlappingCachedRangeDescriptors(key, metaKey proto.Key, desc *proto.RangeDescriptor) {
if desc.StartKey.Equal(desc.EndKey) { // True for some unittests.
return
}
// Clear out any descriptors which subsume the key which we're going
// to cache. For example, if an existing KeyMin->KeyMax descriptor
// should be cleared out in favor of a KeyMin->"m" descriptor.
k, v, ok := rdc.rangeCache.Ceil(rangeCacheKey(metaKey))
if ok {
descriptor := v.(*proto.RangeDescriptor)
addrKey := keys.KeyAddress(key)
if !addrKey.Less(descriptor.StartKey) && !descriptor.EndKey.Less(addrKey) {
if log.V(1) {
log.Infof("clearing overlapping descriptor: key=%s desc=%s", k, descriptor)
}
rdc.rangeCache.Del(k.(rangeCacheKey))
}
}
// Also clear any descriptors which are subsumed by the one we're
// going to cache. This could happen on a merge (and also happens
// when there's a lot of concurrency). Iterate from StartKey.Next().
rdc.rangeCache.DoRange(func(k, v interface{}) {
if log.V(1) {
log.Infof("clearing subsumed descriptor: key=%s desc=%s", k, v.(*proto.RangeDescriptor))
}
rdc.rangeCache.Del(k.(rangeCacheKey))
}, rangeCacheKey(keys.RangeMetaKey(desc.StartKey.Next())),
rangeCacheKey(keys.RangeMetaKey(desc.EndKey)))
}
开发者ID:zhengchen1208,项目名称:cockroach,代码行数:31,代码来源:range_cache.go
示例16: waitAndProcess
// waitAndProcess waits for the pace interval and processes the replica
// if repl is not nil. The method returns true when the scanner needs
// to be stopped. The method also removes a replica from queues when it
// is signaled via the removed channel.
func (rs *replicaScanner) waitAndProcess(
start time.Time, clock *hlc.Clock, stopper *stop.Stopper, repl *Replica,
) bool {
waitInterval := rs.paceInterval(start, timeutil.Now())
rs.waitTimer.Reset(waitInterval)
if log.V(6) {
log.Infof(context.TODO(), "wait timer interval set to %s", waitInterval)
}
for {
select {
case <-rs.waitTimer.C:
if log.V(6) {
log.Infof(context.TODO(), "wait timer fired")
}
rs.waitTimer.Read = true
if repl == nil {
return false
}
return nil != stopper.RunTask(func() {
// Try adding replica to all queues.
for _, q := range rs.queues {
q.MaybeAdd(repl, clock.Now())
}
})
case repl := <-rs.removed:
rs.removeReplica(repl)
case <-stopper.ShouldStop():
return true
}
}
}
开发者ID:yaojingguo,项目名称:cockroach,代码行数:38,代码来源:scanner.go
示例17: MaybeAdd
// MaybeAdd adds the specified replica if bq.shouldQueue specifies it
// should be queued. Replicas are added to the queue using the priority
// returned by bq.shouldQueue. If the queue is too full, the replica may
// not be added, as the replica with the lowest priority will be
// dropped.
func (bq *baseQueue) MaybeAdd(repl *Replica, now roachpb.Timestamp) {
// Load the system config.
cfg := bq.gossip.GetSystemConfig()
if cfg == nil {
log.Infof("no system config available. skipping...")
return
}
desc := repl.Desc()
if !bq.impl.acceptsUnsplitRanges() && cfg.NeedsSplit(desc.StartKey, desc.EndKey) {
// Range needs to be split due to zone configs, but queue does
// not accept unsplit ranges.
if log.V(3) {
log.Infof("range %s needs to be split; not adding", repl)
}
return
}
bq.Lock()
defer bq.Unlock()
should, priority := bq.impl.shouldQueue(now, repl, cfg)
if err := bq.addInternal(repl, should, priority); err != nil && log.V(3) {
log.Infof("couldn't add %s to queue %s: %s", repl, bq.name, err)
}
}
开发者ID:rohanahata,项目名称:cockroach,代码行数:30,代码来源:queue.go
示例18: commitEntries
func (s *state) commitEntries(g *group, leaderCommitIndex int) {
if leaderCommitIndex == g.commitIndex {
return
} else if leaderCommitIndex < g.commitIndex {
// Commit index cannot actually move backwards, but a newly-elected leader might
// report stale positions for a short time so just ignore them.
log.V(6).Infof("node %v: ignoring commit index %v because it is behind existing commit %v",
s.nodeID, leaderCommitIndex, g.commitIndex)
return
}
g.leaderCommitIndex = leaderCommitIndex
index := leaderCommitIndex
if index > g.persistedLastIndex {
// If we are not caught up with the leader, just commit as far as we can.
// We'll continue to commit new entries as we receive AppendEntriesRequests.
log.V(6).Infof("node %v: leader is commited to %v, but capping to %v",
s.nodeID, index, g.persistedLastIndex)
index = g.persistedLastIndex
}
log.V(6).Infof("node %v advancing commit position for group %v from %v to %v",
s.nodeID, g.groupID, g.commitIndex, index)
// TODO(bdarnell): move storage access (incl. the channel iteration) to a goroutine
entries := make(chan *LogEntryState, 100)
go s.Storage.GetLogEntries(g.groupID, g.commitIndex+1, index, entries)
for entry := range entries {
log.V(6).Infof("node %v: committing %+v", s.nodeID, entry)
if entry.Entry.Type == LogEntryCommand {
s.sendEvent(&EventCommandCommitted{entry.Entry.Payload})
}
}
g.commitIndex = index
s.broadcastEntries(g, nil)
}
开发者ID:kuguobing,项目名称:cockroach,代码行数:33,代码来源:multiraft.go
示例19: GetJSON
// GetJSON retrieves the URL specified by https://Addr(<port>)<path>
// and unmarshals the result as JSON.
func (c *Container) GetJSON(port, path string, v interface{}) error {
client := &http.Client{
Timeout: 200 * time.Millisecond,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
}}
resp, err := client.Get(fmt.Sprintf("https://%s%s", c.Addr(port), path))
if err != nil {
if log.V(1) {
log.Info(err)
}
return err
}
defer resp.Body.Close()
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
if log.V(1) {
log.Info(err)
}
return err
}
if err := json.Unmarshal(b, v); err != nil {
if log.V(1) {
log.Info(err)
}
}
return nil
}
开发者ID:nkhuyu,项目名称:cockroach,代码行数:32,代码来源:docker.go
示例20: AddMetricStruct
// AddMetricStruct examines all fields of metricStruct and adds
// all Iterable or metricGroup objects to the registry.
func (r *Registry) AddMetricStruct(metricStruct interface{}) {
v := reflect.ValueOf(metricStruct)
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
t := v.Type()
for i := 0; i < v.NumField(); i++ {
vfield, tfield := v.Field(i), t.Field(i)
if !vfield.CanInterface() {
if log.V(2) {
log.Infof(context.TODO(), "Skipping unexported field %s", tfield.Name)
}
continue
}
val := vfield.Interface()
switch typ := val.(type) {
case metricGroup:
r.AddMetricGroup(typ)
case Iterable:
r.AddMetric(typ)
default:
if log.V(2) {
log.Infof(context.TODO(), "Skipping non-metric field %s", tfield.Name)
}
}
}
}
开发者ID:yaojingguo,项目名称:cockroach,代码行数:30,代码来源:registry.go
注:本文中的github.com/cockroachdb/cockroach/util/log.V函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论