• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Golang log.Infof函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Golang中github.com/cockroachdb/cockroach/pkg/util/log.Infof函数的典型用法代码示例。如果您正苦于以下问题:Golang Infof函数的具体用法?Golang Infof怎么用?Golang Infof使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了Infof函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。

示例1: postFreeze

func postFreeze(
	c cluster.Cluster, freeze bool, timeout time.Duration,
) (serverpb.ClusterFreezeResponse, error) {
	httpClient := cluster.HTTPClient
	httpClient.Timeout = timeout

	var resp serverpb.ClusterFreezeResponse
	log.Infof(context.Background(), "requesting: freeze=%t, timeout=%s", freeze, timeout)
	cb := func(v proto.Message) {
		oldNum := resp.RangesAffected
		resp = *v.(*serverpb.ClusterFreezeResponse)
		if oldNum > resp.RangesAffected {
			resp.RangesAffected = oldNum
		}
		if (resp != serverpb.ClusterFreezeResponse{}) {
			log.Infof(context.Background(), "%+v", &resp)
		}
	}
	err := httputil.StreamJSON(
		httpClient,
		c.URL(0)+"/_admin/v1/cluster/freeze",
		&serverpb.ClusterFreezeRequest{Freeze: freeze},
		&serverpb.ClusterFreezeResponse{},
		cb,
	)
	return resp, err
}
开发者ID:knz,项目名称:cockroach,代码行数:27,代码来源:freeze_test.go


示例2: testGossipPeeringsInner

func testGossipPeeringsInner(
	ctx context.Context, t *testing.T, c cluster.Cluster, cfg cluster.TestConfig,
) {
	num := c.NumNodes()

	deadline := timeutil.Now().Add(cfg.Duration)

	waitTime := longWaitTime
	if cfg.Duration < waitTime {
		waitTime = shortWaitTime
	}

	for timeutil.Now().Before(deadline) {
		CheckGossip(ctx, t, c, waitTime, HasPeers(num))

		// Restart the first node.
		log.Infof(ctx, "restarting node 0")
		if err := c.Restart(ctx, 0); err != nil {
			t.Fatal(err)
		}
		CheckGossip(ctx, t, c, waitTime, HasPeers(num))

		// Restart another node (if there is one).
		var pickedNode int
		if num > 1 {
			pickedNode = rand.Intn(num-1) + 1
		}
		log.Infof(ctx, "restarting node %d", pickedNode)
		if err := c.Restart(ctx, pickedNode); err != nil {
			t.Fatal(err)
		}
		CheckGossip(ctx, t, c, waitTime, HasPeers(num))
	}
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:34,代码来源:gossip_peerings_test.go


示例3: deleteRow

// deleteRow adds to the batch the kv operations necessary to delete a table row
// with the given values.
func (rd *rowDeleter) deleteRow(ctx context.Context, b *client.Batch, values []parser.Datum) error {
	if err := rd.fks.checkAll(values); err != nil {
		return err
	}

	primaryIndexKey, secondaryIndexEntries, err := rd.helper.encodeIndexes(rd.fetchColIDtoRowIndex, values)
	if err != nil {
		return err
	}

	for _, secondaryIndexEntry := range secondaryIndexEntries {
		if log.V(2) {
			log.Infof(ctx, "Del %s", secondaryIndexEntry.Key)
		}
		b.Del(secondaryIndexEntry.Key)
	}

	// Delete the row.
	rd.startKey = roachpb.Key(primaryIndexKey)
	rd.endKey = roachpb.Key(encoding.EncodeNotNullDescending(primaryIndexKey))
	if log.V(2) {
		log.Infof(ctx, "DelRange %s - %s", rd.startKey, rd.endKey)
	}
	b.DelRange(&rd.startKey, &rd.endKey, false)
	rd.startKey, rd.endKey = nil, nil

	return nil
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:30,代码来源:rowwriter.go


示例4: main

func main() {
	flag.Parse()

	c := localcluster.New(*numNodes)
	defer c.Close()

	log.SetExitFunc(func(code int) {
		c.Close()
		os.Exit(code)
	})

	signalCh := make(chan os.Signal, 1)
	signal.Notify(signalCh, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
	a := newAllocSim(c)

	go func() {
		var exitStatus int
		select {
		case s := <-signalCh:
			log.Infof(context.Background(), "signal received: %v", s)
			exitStatus = 1
		case <-time.After(*duration):
			log.Infof(context.Background(), "finished run of: %s", *duration)
		}
		a.finalStatus()
		c.Close()
		os.Exit(exitStatus)
	}()

	c.Start("allocsim", *workers, flag.Args(), []string{})
	c.UpdateZoneConfig(1, 1<<20)
	a.run(*workers)
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:33,代码来源:main.go


示例5: improve

// improve returns a candidate StoreDescriptor to rebalance a replica to. The
// strategy is to always converge on the mean range count. If that isn't
// possible, we don't return any candidate.
func (rcb rangeCountBalancer) improve(sl StoreList, excluded nodeIDSet) *roachpb.StoreDescriptor {
	// Attempt to select a better candidate from the supplied list.
	sl.stores = selectRandom(rcb.rand, allocatorRandomCount, sl, excluded)
	candidate := rcb.selectBest(sl)
	if candidate == nil {
		if log.V(2) {
			log.Infof(context.TODO(), "not rebalancing: no valid candidate targets: %s",
				formatCandidates(nil, sl.stores))
		}
		return nil
	}

	// Adding a replica to the candidate must make its range count converge on the
	// mean range count.
	rebalanceConvergesOnMean := rebalanceToConvergesOnMean(sl, *candidate)
	if !rebalanceConvergesOnMean {
		if log.V(2) {
			log.Infof(context.TODO(), "not rebalancing: %s wouldn't converge on the mean %.1f",
				formatCandidates(candidate, sl.stores), sl.candidateCount.mean)
		}
		return nil
	}

	if log.V(2) {
		log.Infof(context.TODO(), "rebalancing: mean=%.1f %s",
			sl.candidateCount.mean, formatCandidates(candidate, sl.stores))
	}
	return candidate
}
开发者ID:hvaara,项目名称:cockroach,代码行数:32,代码来源:balancer.go


示例6: waitAndProcess

// waitAndProcess waits for the pace interval and processes the replica
// if repl is not nil. The method returns true when the scanner needs
// to be stopped. The method also removes a replica from queues when it
// is signaled via the removed channel.
func (rs *replicaScanner) waitAndProcess(
	ctx context.Context, start time.Time, clock *hlc.Clock, stopper *stop.Stopper, repl *Replica,
) bool {
	waitInterval := rs.paceInterval(start, timeutil.Now())
	rs.waitTimer.Reset(waitInterval)
	if log.V(6) {
		log.Infof(ctx, "wait timer interval set to %s", waitInterval)
	}
	for {
		select {
		case <-rs.waitTimer.C:
			if log.V(6) {
				log.Infof(ctx, "wait timer fired")
			}
			rs.waitTimer.Read = true
			if repl == nil {
				return false
			}

			if log.V(2) {
				log.Infof(ctx, "replica scanner processing %s", repl)
			}
			for _, q := range rs.queues {
				q.MaybeAdd(repl, clock.Now())
			}
			return false

		case repl := <-rs.removed:
			rs.removeReplica(repl)

		case <-stopper.ShouldStop():
			return true
		}
	}
}
开发者ID:knz,项目名称:cockroach,代码行数:39,代码来源:scanner.go


示例7: process

// process synchronously invokes admin split for each proposed split key.
func (sq *splitQueue) process(
	ctx context.Context, now hlc.Timestamp, r *Replica, sysCfg config.SystemConfig,
) error {
	// First handle case of splitting due to zone config maps.
	desc := r.Desc()
	splitKeys := sysCfg.ComputeSplitKeys(desc.StartKey, desc.EndKey)
	if len(splitKeys) > 0 {
		log.Infof(ctx, "splitting at keys %v", splitKeys)
		for _, splitKey := range splitKeys {
			if err := sq.db.AdminSplit(ctx, splitKey.AsRawKey()); err != nil {
				return errors.Errorf("unable to split %s at key %q: %s", r, splitKey, err)
			}
		}
		return nil
	}

	// Next handle case of splitting due to size.
	zone, err := sysCfg.GetZoneConfigForKey(desc.StartKey)
	if err != nil {
		return err
	}
	size := r.GetMVCCStats().Total()
	// FIXME: why is this implementation not the same as the one above?
	if float64(size)/float64(zone.RangeMaxBytes) > 1 {
		log.Infof(ctx, "splitting size=%d max=%d", size, zone.RangeMaxBytes)
		if _, pErr := client.SendWrappedWith(ctx, r, roachpb.Header{
			Timestamp: now,
		}, &roachpb.AdminSplitRequest{
			Span: roachpb.Span{Key: desc.StartKey.AsRawKey()},
		}); pErr != nil {
			return pErr.GoError()
		}
	}
	return nil
}
开发者ID:knz,项目名称:cockroach,代码行数:36,代码来源:split_queue.go


示例8: createDescriptorWithID

func (p *planner) createDescriptorWithID(
	idKey roachpb.Key, id sqlbase.ID, descriptor sqlbase.DescriptorProto,
) error {
	descriptor.SetID(id)
	// TODO(pmattis): The error currently returned below is likely going to be
	// difficult to interpret.
	//
	// TODO(pmattis): Need to handle if-not-exists here as well.
	//
	// TODO(pmattis): This is writing the namespace and descriptor table entries,
	// but not going through the normal INSERT logic and not performing a precise
	// mimicry. In particular, we're only writing a single key per table, while
	// perfect mimicry would involve writing a sentinel key for each row as well.
	descKey := sqlbase.MakeDescMetadataKey(descriptor.GetID())

	b := &client.Batch{}
	descID := descriptor.GetID()
	descDesc := sqlbase.WrapDescriptor(descriptor)
	if log.V(2) {
		log.Infof(p.ctx(), "CPut %s -> %d", idKey, descID)
		log.Infof(p.ctx(), "CPut %s -> %s", descKey, descDesc)
	}
	b.CPut(idKey, descID, nil)
	b.CPut(descKey, descDesc, nil)

	p.setTestingVerifyMetadata(func(systemConfig config.SystemConfig) error {
		if err := expectDescriptorID(systemConfig, idKey, descID); err != nil {
			return err
		}
		return expectDescriptor(systemConfig, descKey, descDesc)
	})

	return p.txn.Run(b)
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:34,代码来源:descriptor.go


示例9: flush

// flush sends the rows accumulated so far in a StreamMessage.
func (m *outbox) flush(last bool, err error) error {
	if !last && m.numRows == 0 {
		return nil
	}
	msg := m.encoder.FormMessage(last, err)

	if log.V(3) {
		log.Infof(m.flowCtx.Context, "flushing outbox")
	}
	var sendErr error
	if m.stream != nil {
		sendErr = m.stream.Send(msg)
	} else {
		sendErr = m.syncFlowStream.Send(msg)
	}
	if sendErr != nil {
		if log.V(1) {
			log.Errorf(m.flowCtx.Context, "outbox flush error: %s", sendErr)
		}
	} else if log.V(3) {
		log.Infof(m.flowCtx.Context, "outbox flushed")
	}
	if sendErr != nil {
		return sendErr
	}

	m.numRows = 0
	return nil
}
开发者ID:hvaara,项目名称:cockroach,代码行数:30,代码来源:outbox.go


示例10: pullImage

func pullImage(
	ctx context.Context, l *LocalCluster, ref string, options types.ImagePullOptions,
) error {
	// HACK: on CircleCI, docker pulls the image on the first access from an
	// acceptance test even though that image is already present. So we first
	// check to see if our image is present in order to avoid this slowness.
	if hasImage(ctx, l, ref) {
		log.Infof(ctx, "ImagePull %s already exists", ref)
		return nil
	}

	log.Infof(ctx, "ImagePull %s starting", ref)
	defer log.Infof(ctx, "ImagePull %s complete", ref)

	rc, err := l.client.ImagePull(ctx, ref, options)
	if err != nil {
		return err
	}
	defer rc.Close()
	out := os.Stderr
	outFd := out.Fd()
	isTerminal := isatty.IsTerminal(outFd)

	return jsonmessage.DisplayJSONMessagesStream(rc, out, outFd, isTerminal, nil)
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:25,代码来源:docker.go


示例11: initNodeID

// initNodeID updates the internal NodeDescriptor with the given ID. If zero is
// supplied, a new NodeID is allocated with the first invocation. For all other
// values, the supplied ID is stored into the descriptor (unless one has been
// set previously, in which case a fatal error occurs).
//
// Upon setting a new NodeID, the descriptor is gossiped and the NodeID is
// stored into the gossip instance.
func (n *Node) initNodeID(id roachpb.NodeID) {
	ctx := n.AnnotateCtx(context.TODO())
	if id < 0 {
		log.Fatalf(ctx, "NodeID must not be negative")
	}

	if o := n.Descriptor.NodeID; o > 0 {
		if id == 0 {
			return
		}
		log.Fatalf(ctx, "cannot initialize NodeID to %d, already have %d", id, o)
	}
	var err error
	if id == 0 {
		ctxWithSpan, span := n.AnnotateCtxWithSpan(ctx, "alloc-node-id")
		id, err = allocateNodeID(ctxWithSpan, n.storeCfg.DB)
		if err != nil {
			log.Fatal(ctxWithSpan, err)
		}
		log.Infof(ctxWithSpan, "new node allocated ID %d", id)
		if id == 0 {
			log.Fatal(ctxWithSpan, "new node allocated illegal ID 0")
		}
		span.Finish()
		n.storeCfg.Gossip.NodeID.Set(ctx, id)
	} else {
		log.Infof(ctx, "node ID %d initialized", id)
	}
	// Gossip the node descriptor to make this node addressable by node ID.
	n.Descriptor.NodeID = id
	if err = n.storeCfg.Gossip.SetNodeDescriptor(&n.Descriptor); err != nil {
		log.Fatalf(ctx, "couldn't gossip descriptor for node %d: %s", n.Descriptor.NodeID, err)
	}
}
开发者ID:knz,项目名称:cockroach,代码行数:41,代码来源:node.go


示例12: runHistoryWithRetry

// runHistoryWithRetry intercepts retry errors. If one is encountered,
// alternate histories are generated which all contain the exact
// history prefix which encountered the error, but which recombine the
// remaining commands with all of the commands from the retrying
// history.
//
// This process continues recursively if there are further retries.
func (hv *historyVerifier) runHistoryWithRetry(
	priorities []int32, isolations []enginepb.IsolationType, cmds []*cmd, db *client.DB, t *testing.T,
) error {
	if err := hv.runHistory(priorities, isolations, cmds, db, t); err != nil {
		if log.V(1) {
			log.Infof(context.Background(), "got an error running history %s: %s", historyString(cmds), err)
		}
		retry, ok := err.(*retryError)
		if !ok {
			return err
		}

		if _, hasRetried := hv.retriedTxns[retry.txnIdx]; hasRetried {
			if log.V(1) {
				log.Infof(context.Background(), "retried txn %d twice; skipping history", retry.txnIdx+1)
			}
			return nil
		}
		hv.retriedTxns[retry.txnIdx] = struct{}{}

		// Randomly subsample 5% of histories for reduced execution time.
		enumHis := sampleHistories(enumerateHistoriesAfterRetry(retry, cmds), 0.05)
		for i, h := range enumHis {
			if log.V(1) {
				log.Infof(context.Background(), "after retry, running alternate history %d of %d", i, len(enumHis))
			}
			if err := hv.runHistoryWithRetry(priorities, isolations, h, db, t); err != nil {
				return err
			}
		}
	}
	return nil
}
开发者ID:knz,项目名称:cockroach,代码行数:40,代码来源:txn_correctness_test.go


示例13: AddMetricStruct

// AddMetricStruct examines all fields of metricStruct and adds
// all Iterable or metricGroup objects to the registry.
func (r *Registry) AddMetricStruct(metricStruct interface{}) {
	v := reflect.ValueOf(metricStruct)
	if v.Kind() == reflect.Ptr {
		v = v.Elem()
	}
	t := v.Type()

	for i := 0; i < v.NumField(); i++ {
		vfield, tfield := v.Field(i), t.Field(i)
		if !vfield.CanInterface() {
			if log.V(2) {
				log.Infof(context.TODO(), "Skipping unexported field %s", tfield.Name)
			}
			continue
		}
		val := vfield.Interface()
		switch typ := val.(type) {
		case Iterable:
			r.AddMetric(typ)
		case Struct:
			r.AddMetricStruct(typ)
		default:
			if log.V(2) {
				log.Infof(context.TODO(), "Skipping non-metric field %s", tfield.Name)
			}
		}
	}
}
开发者ID:knz,项目名称:cockroach,代码行数:30,代码来源:registry.go


示例14: sendGossip

// sendGossip sends the latest gossip to the remote server, based on
// the remote server's notion of other nodes' high water timestamps.
func (c *client) sendGossip(g *Gossip, stream Gossip_GossipClient) error {
	g.mu.Lock()
	if delta := g.mu.is.delta(c.remoteHighWaterStamps); len(delta) > 0 {
		args := Request{
			NodeID:          g.NodeID.Get(),
			Addr:            g.mu.is.NodeAddr,
			Delta:           delta,
			HighWaterStamps: g.mu.is.getHighWaterStamps(),
		}

		bytesSent := int64(args.Size())
		infosSent := int64(len(delta))
		c.clientMetrics.BytesSent.Inc(bytesSent)
		c.clientMetrics.InfosSent.Inc(infosSent)
		c.nodeMetrics.BytesSent.Inc(bytesSent)
		c.nodeMetrics.InfosSent.Inc(infosSent)

		if log.V(1) {
			ctx := c.AnnotateCtx(stream.Context())
			if c.peerID != 0 {
				log.Infof(ctx, "sending %s to node %d (%s)", extractKeys(args.Delta), c.peerID, c.addr)
			} else {
				log.Infof(ctx, "sending %s to %s", extractKeys(args.Delta), c.addr)
			}
		}

		g.mu.Unlock()
		return stream.Send(&args)
	}
	g.mu.Unlock()
	return nil
}
开发者ID:knz,项目名称:cockroach,代码行数:34,代码来源:client.go


示例15: wrap

// wrap the supplied planNode with the sortNode if sorting is required.
// The first returned value is "true" if the sort node can be squashed
// in the selectTopNode (sorting unneeded).
func (n *sortNode) wrap(plan planNode) (bool, planNode) {
	if n != nil {
		// Check to see if the requested ordering is compatible with the existing
		// ordering.
		existingOrdering := plan.Ordering()
		if log.V(2) {
			log.Infof(n.ctx, "Sort: existing=%+v desired=%+v", existingOrdering, n.ordering)
		}
		match := computeOrderingMatch(n.ordering, existingOrdering, false)
		if match < len(n.ordering) {
			n.plan = plan
			n.needSort = true
			return false, n
		}

		if len(n.columns) < len(plan.Columns()) {
			// No sorting required, but we have to strip off the extra render
			// expressions we added.
			n.plan = plan
			return false, n
		}

		if log.V(2) {
			log.Infof(n.ctx, "Sort: no sorting required")
		}
	}

	return true, plan
}
开发者ID:veteranlu,项目名称:cockroach,代码行数:32,代码来源:sort.go


示例16: throttle

// throttle informs the store pool that the given remote store declined a
// snapshot or failed to apply one, ensuring that it will not be considered
// for up-replication or rebalancing until after the configured timeout period
// has elapsed. Declined being true indicates that the remote store explicitly
// declined a snapshot.
func (sp *StorePool) throttle(reason throttleReason, toStoreID roachpb.StoreID) {
	sp.mu.Lock()
	defer sp.mu.Unlock()
	detail := sp.getStoreDetailLocked(toStoreID)
	ctx := sp.AnnotateCtx(context.TODO())

	// If a snapshot is declined, be it due to an error or because it was
	// rejected, we mark the store detail as having been declined so it won't
	// be considered as a candidate for new replicas until after the configured
	// timeout period has passed.
	switch reason {
	case throttleDeclined:
		detail.throttledUntil = sp.clock.Now().GoTime().Add(sp.declinedReservationsTimeout)
		if log.V(2) {
			log.Infof(ctx, "snapshot declined, store:%s will be throttled for %s until %s",
				toStoreID, sp.declinedReservationsTimeout, detail.throttledUntil)
		}
	case throttleFailed:
		detail.throttledUntil = sp.clock.Now().GoTime().Add(sp.failedReservationsTimeout)
		if log.V(2) {
			log.Infof(ctx, "snapshot failed, store:%s will be throttled for %s until %s",
				toStoreID, sp.failedReservationsTimeout, detail.throttledUntil)
		}
	}
}
开发者ID:nvanbenschoten,项目名称:cockroach,代码行数:30,代码来源:store_pool.go


示例17: Run

func (at *allocatorTest) Run(ctx context.Context, t *testing.T) {
	at.f = MakeFarmer(t, at.Prefix, stopper)

	if at.CockroachDiskSizeGB != 0 {
		at.f.AddVars["cockroach_disk_size"] = strconv.Itoa(at.CockroachDiskSizeGB)
	}

	log.Infof(ctx, "creating cluster with %d node(s)", at.StartNodes)
	if err := at.f.Resize(at.StartNodes); err != nil {
		t.Fatal(err)
	}
	CheckGossip(ctx, t, at.f, longWaitTime, HasPeers(at.StartNodes))
	at.f.Assert(ctx, t)
	log.Info(ctx, "initial cluster is up")

	// We must stop the cluster because a) `nodectl` pokes at the data directory
	// and, more importantly, b) we don't want the cluster above and the cluster
	// below to ever talk to each other (see #7224).
	log.Info(ctx, "stopping cluster")
	for i := 0; i < at.f.NumNodes(); i++ {
		if err := at.f.Kill(ctx, i); err != nil {
			t.Fatalf("error stopping node %d: %s", i, err)
		}
	}

	log.Info(ctx, "downloading archived stores from Google Cloud Storage in parallel")
	errors := make(chan error, at.f.NumNodes())
	for i := 0; i < at.f.NumNodes(); i++ {
		go func(nodeNum int) {
			errors <- at.f.Exec(nodeNum, "./nodectl download "+at.StoreURL)
		}(i)
	}
	for i := 0; i < at.f.NumNodes(); i++ {
		if err := <-errors; err != nil {
			t.Fatalf("error downloading store %d: %s", i, err)
		}
	}

	log.Info(ctx, "restarting cluster with archived store(s)")
	for i := 0; i < at.f.NumNodes(); i++ {
		if err := at.f.Restart(ctx, i); err != nil {
			t.Fatalf("error restarting node %d: %s", i, err)
		}
	}
	at.f.Assert(ctx, t)

	log.Infof(ctx, "resizing cluster to %d nodes", at.EndNodes)
	if err := at.f.Resize(at.EndNodes); err != nil {
		t.Fatal(err)
	}
	CheckGossip(ctx, t, at.f, longWaitTime, HasPeers(at.EndNodes))
	at.f.Assert(ctx, t)

	log.Info(ctx, "waiting for rebalance to finish")
	if err := at.WaitForRebalance(ctx, t); err != nil {
		t.Fatal(err)
	}
	at.f.Assert(ctx, t)
}
开发者ID:jmptrader,项目名称:cockroach,代码行数:59,代码来源:allocator_test.go


示例18: Start

// Start starts a node.
func (n *Node) Start() {
	n.Lock()
	defer n.Unlock()

	if n.cmd != nil {
		return
	}

	n.cmd = exec.Command(n.args[0], n.args[1:]...)
	n.cmd.Env = os.Environ()
	n.cmd.Env = append(n.cmd.Env, n.env...)

	stdoutPath := filepath.Join(n.logDir, "stdout")
	stdout, err := os.OpenFile(stdoutPath,
		os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
	if err != nil {
		log.Fatalf(context.Background(), "unable to open file %s: %s", stdoutPath, err)
	}
	n.cmd.Stdout = stdout

	stderrPath := filepath.Join(n.logDir, "stderr")
	stderr, err := os.OpenFile(stderrPath,
		os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
	if err != nil {
		log.Fatalf(context.Background(), "unable to open file %s: %s", stderrPath, err)
	}
	n.cmd.Stderr = stderr

	err = n.cmd.Start()
	if n.cmd.Process != nil {
		log.Infof(context.Background(), "process %d started: %s",
			n.cmd.Process.Pid, strings.Join(n.args, " "))
	}
	if err != nil {
		log.Infof(context.Background(), "%v", err)
		_ = stdout.Close()
		_ = stderr.Close()
		return
	}

	go func(cmd *exec.Cmd) {
		if err := cmd.Wait(); err != nil {
			log.Errorf(context.Background(), "waiting for command: %v", err)
		}
		_ = stdout.Close()
		_ = stderr.Close()

		ps := cmd.ProcessState
		sy := ps.Sys().(syscall.WaitStatus)

		log.Infof(context.Background(), "Process %d exited with status %d",
			ps.Pid(), sy.ExitStatus())
		log.Infof(context.Background(), ps.String())

		n.Lock()
		n.cmd = nil
		n.Unlock()
	}(n.cmd)
}
开发者ID:knz,项目名称:cockroach,代码行数:60,代码来源:localcluster.go


示例19: Run

// Run is part of the processor interface.
func (d *distinct) Run(wg *sync.WaitGroup) {
	if wg != nil {
		defer wg.Done()
	}

	ctx, span := tracing.ChildSpan(d.ctx, "distinct")
	defer tracing.FinishSpan(span)

	if log.V(2) {
		log.Infof(ctx, "starting distinct process")
		defer log.Infof(ctx, "exiting distinct")
	}

	var scratch []byte
	for {
		row, err := d.input.NextRow()
		if err != nil || row == nil {
			d.output.Close(err)
			return
		}

		// If we are processing DISTINCT(x, y) and the input stream is ordered
		// by x, we define x to be our group key. Our seen set at any given time
		// is only the set of all rows with the same group key. The encoding of
		// the row is the key we use in our 'seen' set.
		encoding, err := d.encode(scratch, row)
		if err != nil {
			d.output.Close(err)
			return
		}

		// The 'seen' set is reset whenever we find consecutive rows differing on the
		// group key thus avoiding the need to store encodings of all rows.
		matched, err := d.matchLastGroupKey(row)
		if err != nil {
			d.output.Close(err)
			return
		}

		if !matched {
			d.lastGroupKey = row
			d.seen = make(map[string]struct{})
		}

		key := string(encoding)
		if _, ok := d.seen[key]; !ok {
			d.seen[key] = struct{}{}
			if !d.output.PushRow(row) {
				if log.V(2) {
					log.Infof(ctx, "no more rows required")
				}
				d.output.Close(nil)
				return
			}
		}
		scratch = encoding[:0]
	}
}
开发者ID:knz,项目名称:cockroach,代码行数:59,代码来源:distinct.go


示例20: Run

// Run is part of the processor interface.
func (ev *evaluator) Run(wg *sync.WaitGroup) {
	if wg != nil {
		defer wg.Done()
	}

	ctx, span := tracing.ChildSpan(ev.ctx, "evaluator")
	defer tracing.FinishSpan(span)

	if log.V(2) {
		log.Infof(ctx, "starting evaluator process")
		defer log.Infof(ctx, "exiting evaluator")
	}

	first := true
	for {
		row, err := ev.input.NextRow()
		if err != nil || row == nil {
			ev.output.Close(err)
			return
		}

		if first {
			first = false

			types := make([]sqlbase.ColumnType_Kind, len(row))
			for i := range types {
				types[i] = row[i].Type
			}
			for i, expr := range ev.specExprs {
				err := ev.exprs[i].init(expr, types, ev.flowCtx.evalCtx)
				if err != nil {
					ev.output.Close(err)
					return
				}
				ev.exprTypes[i] = sqlbase.DatumTypeToColumnKind(ev.exprs[i].expr.ResolvedType())
			}
		}

		outRow, err := ev.eval(row)
		if err != nil {
			ev.output.Close(err)
			return
		}

		if log.V(3) {
			log.Infof(ctx, "pushing %s\n", outRow)
		}
		// Push the row to the output RowReceiver; stop if they don't need more
		// rows.
		if !ev.output.PushRow(outRow) {
			if log.V(2) {
				log.Infof(ctx, "no more rows required")
			}
			ev.output.Close(nil)
			return
		}
	}
}
开发者ID:jmptrader,项目名称:cockroach,代码行数:59,代码来源:evaluator.go



注:本文中的github.com/cockroachdb/cockroach/pkg/util/log.Infof函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Golang log.V函数代码示例发布时间:2022-05-23
下一篇:
Golang log.Info函数代码示例发布时间:2022-05-23
热门推荐
热门话题
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap