本文整理汇总了Golang中github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess/context.OnClosingContext函数的典型用法代码示例。如果您正苦于以下问题:Golang OnClosingContext函数的具体用法?Golang OnClosingContext怎么用?Golang OnClosingContext使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了OnClosingContext函数的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: Bootstrap
// Bootstrap kicks off IpfsNode bootstrapping. This function will periodically
// check the number of open connections and -- if there are too few -- initiate
// connections to well-known bootstrap peers. It also kicks off subsystem
// bootstrapping (i.e. routing).
func Bootstrap(n *IpfsNode, cfg BootstrapConfig) (io.Closer, error) {
// make a signal to wait for one bootstrap round to complete.
doneWithRound := make(chan struct{})
// the periodic bootstrap function -- the connection supervisor
periodic := func(worker goprocess.Process) {
ctx := procctx.OnClosingContext(worker)
defer log.EventBegin(ctx, "periodicBootstrap", n.Identity).Done()
if err := bootstrapRound(ctx, n.PeerHost, cfg); err != nil {
log.Event(ctx, "bootstrapError", n.Identity, lgbl.Error(err))
log.Debugf("%s bootstrap error: %s", n.Identity, err)
}
<-doneWithRound
}
// kick off the node's periodic bootstrapping
proc := periodicproc.Tick(cfg.Period, periodic)
proc.Go(periodic) // run one right now.
// kick off Routing.Bootstrap
if n.Routing != nil {
ctx := procctx.OnClosingContext(proc)
if err := n.Routing.Bootstrap(ctx); err != nil {
proc.Close()
return nil, err
}
}
doneWithRound <- struct{}{}
close(doneWithRound) // it no longer blocks periodic
return proc, nil
}
开发者ID:musha68k,项目名称:go-ipfs,代码行数:39,代码来源:bootstrap.go
示例2: republishEntries
func (rp *Republisher) republishEntries(p goprocess.Process) error {
ctx, cancel := context.WithCancel(gpctx.OnClosingContext(p))
defer cancel()
for id, _ := range rp.entries {
log.Debugf("republishing ipns entry for %s", id)
priv := rp.ps.PrivKey(id)
// Look for it locally only
_, ipnskey := namesys.IpnsKeysForID(id)
p, seq, err := rp.getLastVal(ipnskey)
if err != nil {
if err == errNoEntry {
continue
}
return err
}
// update record with same sequence number
eol := time.Now().Add(rp.RecordLifetime)
err = namesys.PutRecordToRouting(ctx, priv, p, seq, eol, rp.r, id)
if err != nil {
return err
}
}
return nil
}
开发者ID:musha68k,项目名称:go-ipfs,代码行数:28,代码来源:repub.go
示例3: provideWorker
func (bs *Bitswap) provideWorker(px process.Process) {
limit := make(chan struct{}, provideWorkerMax)
limitedGoProvide := func(k key.Key, wid int) {
defer func() {
// replace token when done
<-limit
}()
ev := logging.LoggableMap{"ID": wid}
ctx := procctx.OnClosingContext(px) // derive ctx from px
defer log.EventBegin(ctx, "Bitswap.ProvideWorker.Work", ev, &k).Done()
ctx, cancel := context.WithTimeout(ctx, provideTimeout) // timeout ctx
defer cancel()
if err := bs.network.Provide(ctx, k); err != nil {
log.Warning(err)
}
}
// worker spawner, reads from bs.provideKeys until it closes, spawning a
// _ratelimited_ number of workers to handle each key.
for wid := 2; ; wid++ {
ev := logging.LoggableMap{"ID": 1}
log.Event(procctx.OnClosingContext(px), "Bitswap.ProvideWorker.Loop", ev)
select {
case <-px.Closing():
return
case k, ok := <-bs.provideKeys:
if !ok {
log.Debug("provideKeys channel closed")
return
}
select {
case <-px.Closing():
return
case limit <- struct{}{}:
go limitedGoProvide(k, wid)
}
}
}
}
开发者ID:noffle,项目名称:go-ipfs,代码行数:45,代码来源:workers.go
示例4: newQueryRunner
func newQueryRunner(q *dhtQuery) *dhtQueryRunner {
proc := process.WithParent(process.Background())
ctx := ctxproc.OnClosingContext(proc)
return &dhtQueryRunner{
query: q,
peersToQuery: queue.NewChanQueue(ctx, queue.NewXORDistancePQ(q.key)),
peersRemaining: todoctr.NewSyncCounter(),
peersSeen: pset.New(),
rateLimit: make(chan struct{}, q.concurrency),
proc: proc,
}
}
开发者ID:JeffreyRodriguez,项目名称:go-ipfs,代码行数:12,代码来源:query.go
示例5: provideWorker
func (bs *Bitswap) provideWorker(px process.Process) {
limiter := ratelimit.NewRateLimiter(px, provideWorkerMax)
limitedGoProvide := func(k key.Key, wid int) {
ev := logging.LoggableMap{"ID": wid}
limiter.LimitedGo(func(px process.Process) {
ctx := procctx.OnClosingContext(px) // derive ctx from px
defer log.EventBegin(ctx, "Bitswap.ProvideWorker.Work", ev, &k).Done()
ctx, cancel := context.WithTimeout(ctx, provideTimeout) // timeout ctx
defer cancel()
if err := bs.network.Provide(ctx, k); err != nil {
log.Error(err)
}
})
}
// worker spawner, reads from bs.provideKeys until it closes, spawning a
// _ratelimited_ number of workers to handle each key.
limiter.Go(func(px process.Process) {
for wid := 2; ; wid++ {
ev := logging.LoggableMap{"ID": 1}
log.Event(procctx.OnClosingContext(px), "Bitswap.ProvideWorker.Loop", ev)
select {
case <-px.Closing():
return
case k, ok := <-bs.provideKeys:
if !ok {
log.Debug("provideKeys channel closed")
return
}
limitedGoProvide(k, wid)
}
}
})
}
开发者ID:JeffreyRodriguez,项目名称:go-ipfs,代码行数:40,代码来源:workers.go
示例6: start
func (w *Worker) start(c Config) {
workerChan := make(chan *blocks.Block, c.WorkerBufferSize)
// clientWorker handles incoming blocks from |w.added| and sends to
// |workerChan|. This will never block the client.
w.process.Go(func(proc process.Process) {
defer close(workerChan)
var workQueue BlockList
debugInfo := time.NewTicker(5 * time.Second)
defer debugInfo.Stop()
for {
// take advantage of the fact that sending on nil channel always
// blocks so that a message is only sent if a block exists
sendToWorker := workerChan
nextBlock := workQueue.Pop()
if nextBlock == nil {
sendToWorker = nil
}
select {
// if worker is ready and there's a block to process, send the
// block
case sendToWorker <- nextBlock:
case <-debugInfo.C:
if workQueue.Len() > 0 {
log.Debugf("%d blocks in blockservice provide queue...", workQueue.Len())
}
case block := <-w.added:
if nextBlock != nil {
workQueue.Push(nextBlock) // missed the chance to send it
}
// if the client sends another block, add it to the queue.
workQueue.Push(block)
case <-proc.Closing():
return
}
}
})
// reads from |workerChan| until w.process closes
limiter := ratelimit.NewRateLimiter(w.process, c.NumWorkers)
limiter.Go(func(proc process.Process) {
ctx := procctx.OnClosingContext(proc) // shut down in-progress HasBlock when time to die
for {
select {
case <-proc.Closing():
return
case block, ok := <-workerChan:
if !ok {
return
}
limiter.LimitedGo(func(proc process.Process) {
if err := w.exchange.HasBlock(ctx, block); err != nil {
log.Infof("blockservice worker error: %s", err)
}
})
}
}
})
}
开发者ID:noscripter,项目名称:go-ipfs,代码行数:64,代码来源:worker.go
示例7: queryPeer
func (r *dhtQueryRunner) queryPeer(proc process.Process, p peer.ID) {
// make sure we rate limit concurrency.
select {
case <-r.rateLimit:
case <-proc.Closing():
r.peersRemaining.Decrement(1)
return
}
// ok let's do this!
// create a context from our proc.
ctx := ctxproc.OnClosingContext(proc)
// make sure we do this when we exit
defer func() {
// signal we're done proccessing peer p
r.peersRemaining.Decrement(1)
r.rateLimit <- struct{}{}
}()
// make sure we're connected to the peer.
// FIXME abstract away into the network layer
if conns := r.query.dht.host.Network().ConnsToPeer(p); len(conns) == 0 {
log.Infof("not connected. dialing.")
// while we dial, we do not take up a rate limit. this is to allow
// forward progress during potentially very high latency dials.
r.rateLimit <- struct{}{}
pi := peer.PeerInfo{ID: p}
if err := r.query.dht.host.Connect(ctx, pi); err != nil {
log.Debugf("Error connecting: %s", err)
notif.PublishQueryEvent(ctx, ¬if.QueryEvent{
Type: notif.QueryError,
Extra: err.Error(),
})
r.Lock()
r.errs = append(r.errs, err)
r.Unlock()
<-r.rateLimit // need to grab it again, as we deferred.
return
}
<-r.rateLimit // need to grab it again, as we deferred.
log.Debugf("connected. dial success.")
}
// finally, run the query against this peer
res, err := r.query.qfunc(ctx, p)
if err != nil {
log.Debugf("ERROR worker for: %v %v", p, err)
r.Lock()
r.errs = append(r.errs, err)
r.Unlock()
} else if res.success {
log.Debugf("SUCCESS worker for: %v %s", p, res)
r.Lock()
r.result = res
r.Unlock()
go r.proc.Close() // signal to everyone that we're done.
// must be async, as we're one of the children, and Close blocks.
} else if len(res.closerPeers) > 0 {
log.Debugf("PEERS CLOSER -- worker for: %v (%d closer peers)", p, len(res.closerPeers))
for _, next := range res.closerPeers {
if next.ID == r.query.dht.self { // dont add self.
log.Debugf("PEERS CLOSER -- worker for: %v found self", p)
continue
}
// add their addresses to the dialer's peerstore
r.query.dht.peerstore.AddAddrs(next.ID, next.Addrs, peer.TempAddrTTL)
r.addPeerToQuery(next.ID)
log.Debugf("PEERS CLOSER -- worker for: %v added %v (%v)", p, next.ID, next.Addrs)
}
} else {
log.Debugf("QUERY worker for: %v - not found, and no closer peers.", p)
}
}
开发者ID:JeffreyRodriguez,项目名称:go-ipfs,代码行数:83,代码来源:query.go
注:本文中的github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess/context.OnClosingContext函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论