本文整理汇总了Golang中github.com/cockroachdb/cockroach/storage.NewStorePool函数的典型用法代码示例。如果您正苦于以下问题:Golang NewStorePool函数的具体用法?Golang NewStorePool怎么用?Golang NewStorePool使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewStorePool函数的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: createCluster
// createCluster generates a new cluster using the provided stopper and the
// number of nodes supplied. Each node will have one store to start.
func createCluster(stopper *stop.Stopper, nodeCount int) *Cluster {
rand, seed := randutil.NewPseudoRand()
clock := hlc.NewClock(hlc.UnixNano)
rpcContext := rpc.NewContext(&base.Context{}, clock, stopper)
g := gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
storePool := storage.NewStorePool(g, storage.TestTimeUntilStoreDeadOff, stopper)
c := &Cluster{
stopper: stopper,
clock: clock,
rpc: rpcContext,
gossip: g,
storePool: storePool,
allocator: storage.MakeAllocator(storePool, storage.RebalancingOptions{}),
storeGossiper: gossiputil.NewStoreGossiper(g),
nodes: make(map[proto.NodeID]*Node),
stores: make(map[proto.StoreID]*Store),
ranges: make(map[proto.RangeID]*Range),
rand: rand,
seed: seed,
}
// Add the nodes.
for i := 0; i < nodeCount; i++ {
c.addNewNodeWithStore()
}
// Add a single range and add to this first node's first store.
firstRange := c.addRange()
firstRange.attachRangeToStore(c.stores[proto.StoreID(0)])
return c
}
开发者ID:harryyeh,项目名称:cockroach,代码行数:33,代码来源:cluster.go
示例2: Start
func (m *multiTestContext) Start(t *testing.T, numStores int) {
m.t = t
if m.manualClock == nil {
m.manualClock = hlc.NewManualClock(0)
}
if m.clock == nil {
m.clock = hlc.NewClock(m.manualClock.UnixNano)
}
if m.gossip == nil {
rpcContext := rpc.NewContext(&base.Context{}, m.clock, nil)
m.gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
}
if m.clientStopper == nil {
m.clientStopper = stop.NewStopper()
}
if m.transport == nil {
m.transport = multiraft.NewLocalRPCTransport(m.clientStopper)
}
if m.storePool == nil {
if m.timeUntilStoreDead == 0 {
m.timeUntilStoreDead = storage.TestTimeUntilStoreDeadOff
}
m.storePool = storage.NewStorePool(m.gossip, m.timeUntilStoreDead, m.clientStopper)
}
// Always create the first sender.
m.senders = append(m.senders, kv.NewLocalSender())
rpcSend := func(_ rpc.Options, _ string, _ []net.Addr,
getArgs func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message,
_ *rpc.Context) ([]gogoproto.Message, error) {
call := proto.Call{
Args: getArgs(nil /* net.Addr */).(proto.Request),
Reply: getReply().(proto.Response),
}
m.senders[0].Send(context.Background(), call)
return []gogoproto.Message{call.Reply}, call.Reply.Header().GoError()
}
if m.db == nil {
distSender := kv.NewDistSender(&kv.DistSenderContext{
Clock: m.clock,
RangeDescriptorDB: m.senders[0],
RPCSend: rpcSend,
}, m.gossip)
sender := kv.NewTxnCoordSender(distSender, m.clock, false, nil, m.clientStopper)
m.db = client.NewDB(sender)
}
for i := 0; i < numStores; i++ {
m.addStore()
}
if m.transportStopper == nil {
m.transportStopper = stop.NewStopper()
}
m.transportStopper.AddCloser(m.transport)
}
开发者ID:yosiat,项目名称:cockroach,代码行数:57,代码来源:client_test.go
示例3: createCluster
// createCluster generates a new cluster using the provided stopper and the
// number of nodes supplied. Each node will have one store to start.
func createCluster(
stopper *stop.Stopper,
nodeCount int,
epochWriter, actionWriter io.Writer,
script Script,
rand *rand.Rand,
) *Cluster {
clock := hlc.NewClock(hlc.UnixNano)
rpcContext := rpc.NewContext(nil, clock, stopper)
g := gossip.New(rpcContext, nil, stopper)
// NodeID is required for Gossip, so set it to -1 for the cluster Gossip
// instance to prevent conflicts with real NodeIDs.
g.SetNodeID(-1)
storePool := storage.NewStorePool(g, clock, storage.TestTimeUntilStoreDeadOff, stopper)
c := &Cluster{
stopper: stopper,
clock: clock,
rpc: rpcContext,
gossip: g,
storePool: storePool,
allocator: storage.MakeAllocator(storePool, storage.AllocatorOptions{
AllowRebalance: true,
Deterministic: true,
}),
storeGossiper: gossiputil.NewStoreGossiper(g),
nodes: make(map[roachpb.NodeID]*Node),
stores: make(map[roachpb.StoreID]*Store),
ranges: make(map[roachpb.RangeID]*Range),
rangeIDsByStore: make(map[roachpb.StoreID]roachpb.RangeIDSlice),
rand: rand,
epochWriter: tabwriter.NewWriter(epochWriter, 8, 1, 2, ' ', 0),
actionWriter: tabwriter.NewWriter(actionWriter, 8, 1, 2, ' ', 0),
script: script,
epoch: -1,
}
// Add the nodes.
for i := 0; i < nodeCount; i++ {
c.addNewNodeWithStore()
}
// Add a single range and add to this first node's first store.
firstRange := c.addRange()
firstRange.addReplica(c.stores[0])
c.calculateRangeIDsByStore()
// Output the first epoch header.
c.epoch = 0
c.OutputEpochHeader()
c.OutputEpoch()
c.flush()
return c
}
开发者ID:GitGoldie,项目名称:cockroach,代码行数:57,代码来源:cluster.go
示例4: createCluster
// createCluster generates a new cluster using the provided stopper and the
// number of nodes supplied. Each node will have one store to start.
func createCluster(stopper *stop.Stopper, nodeCount int, epochWriter, actionWriter io.Writer, script Script) *Cluster {
rand, seed := randutil.NewPseudoRand()
clock := hlc.NewClock(hlc.UnixNano)
rpcContext := rpc.NewContext(&base.Context{}, clock, stopper)
g := gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
storePool := storage.NewStorePool(g, storage.TestTimeUntilStoreDeadOff, stopper)
c := &Cluster{
stopper: stopper,
clock: clock,
rpc: rpcContext,
gossip: g,
storePool: storePool,
allocator: storage.MakeAllocator(storePool, storage.RebalancingOptions{
AllowRebalance: true,
Deterministic: true,
}),
storeGossiper: gossiputil.NewStoreGossiper(g),
nodes: make(map[roachpb.NodeID]*Node),
stores: make(map[roachpb.StoreID]*Store),
ranges: make(map[roachpb.RangeID]*Range),
rangeIDsByStore: make(map[roachpb.StoreID]roachpb.RangeIDSlice),
rand: rand,
seed: seed,
epochWriter: tabwriter.NewWriter(epochWriter, 8, 1, 2, ' ', 0),
actionWriter: tabwriter.NewWriter(actionWriter, 8, 1, 2, ' ', 0),
script: script,
epoch: -1,
}
// Add the nodes.
for i := 0; i < nodeCount; i++ {
c.addNewNodeWithStore()
}
// Add a single range and add to this first node's first store.
firstRange := c.addRange()
firstRange.addReplica(c.stores[0])
c.calculateRangeIDsByStore()
// Output the first epoch header.
c.epoch = 0
c.OutputEpochHeader()
c.OutputEpoch()
c.flush()
return c
}
开发者ID:nporsche,项目名称:cockroach,代码行数:50,代码来源:cluster.go
示例5: Start
func (m *multiTestContext) Start(t *testing.T, numStores int) {
m.t = t
if m.manualClock == nil {
m.manualClock = hlc.NewManualClock(0)
}
if m.clock == nil {
m.clock = hlc.NewClock(m.manualClock.UnixNano)
}
if m.gossip == nil {
rpcContext := rpc.NewContext(&base.Context{}, m.clock, nil)
m.gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
}
if m.clientStopper == nil {
m.clientStopper = stop.NewStopper()
}
if m.transport == nil {
m.transport = multiraft.NewLocalRPCTransport(m.clientStopper)
}
if m.storePool == nil {
m.storePool = storage.NewStorePool(m.gossip, storage.TestTimeUntilStoreDeadOff, m.clientStopper)
}
// Always create the first sender.
m.senders = append(m.senders, kv.NewLocalSender())
if m.db == nil {
sender := kv.NewTxnCoordSender(m.senders[0], m.clock, false, nil, m.clientStopper)
var err error
if m.db, err = client.Open("//", client.SenderOpt(sender)); err != nil {
t.Fatal(err)
}
}
for i := 0; i < numStores; i++ {
m.addStore()
}
if m.transportStopper == nil {
m.transportStopper = stop.NewStopper()
}
m.transportStopper.AddCloser(m.transport)
}
开发者ID:snehasis419,项目名称:cockroach,代码行数:41,代码来源:client_test.go
示例6: NewServer
// NewServer creates a Server from a server.Context.
func NewServer(ctx *Context, stopper *stop.Stopper) (*Server, error) {
if ctx == nil {
return nil, util.Errorf("ctx must not be null")
}
if _, err := net.ResolveTCPAddr("tcp", ctx.Addr); err != nil {
return nil, util.Errorf("unable to resolve RPC address %q: %v", ctx.Addr, err)
}
if ctx.Insecure {
log.Warning("running in insecure mode, this is strongly discouraged. See --insecure and --certs.")
}
// Try loading the TLS configs before anything else.
if _, err := ctx.GetServerTLSConfig(); err != nil {
return nil, err
}
if _, err := ctx.GetClientTLSConfig(); err != nil {
return nil, err
}
s := &Server{
ctx: ctx,
mux: http.NewServeMux(),
clock: hlc.NewClock(hlc.UnixNano),
stopper: stopper,
}
s.clock.SetMaxOffset(ctx.MaxOffset)
s.rpcContext = crpc.NewContext(&ctx.Context, s.clock, stopper)
stopper.RunWorker(func() {
s.rpcContext.RemoteClocks.MonitorRemoteOffsets(stopper)
})
s.rpc = crpc.NewServer(s.rpcContext)
s.gossip = gossip.New(s.rpcContext, s.ctx.GossipBootstrapResolvers)
s.storePool = storage.NewStorePool(s.gossip, s.clock, ctx.TimeUntilStoreDead, stopper)
feed := util.NewFeed(stopper)
tracer := tracer.NewTracer(feed, ctx.Addr)
ds := kv.NewDistSender(&kv.DistSenderContext{Clock: s.clock, RPCContext: s.rpcContext}, s.gossip)
sender := kv.NewTxnCoordSender(ds, s.clock, ctx.Linearizable, tracer, s.stopper)
s.db = client.NewDB(sender)
var err error
s.raftTransport, err = newRPCTransport(s.gossip, s.rpc, s.rpcContext)
if err != nil {
return nil, err
}
s.stopper.AddCloser(s.raftTransport)
s.kvDB = kv.NewDBServer(&s.ctx.Context, sender)
if err := s.kvDB.RegisterRPC(s.rpc); err != nil {
return nil, err
}
leaseMgr := sql.NewLeaseManager(0, *s.db, s.clock)
leaseMgr.RefreshLeases(s.stopper, s.db, s.gossip)
s.sqlServer = sql.MakeServer(&s.ctx.Context, *s.db, s.gossip, leaseMgr)
if err := s.sqlServer.RegisterRPC(s.rpc); err != nil {
return nil, err
}
s.pgServer = pgwire.NewServer(&pgwire.Context{
Context: &s.ctx.Context,
Executor: s.sqlServer.Executor,
Stopper: stopper,
})
// TODO(bdarnell): make StoreConfig configurable.
nCtx := storage.StoreContext{
Clock: s.clock,
DB: s.db,
Gossip: s.gossip,
Transport: s.raftTransport,
ScanInterval: s.ctx.ScanInterval,
ScanMaxIdleTime: s.ctx.ScanMaxIdleTime,
EventFeed: feed,
Tracer: tracer,
StorePool: s.storePool,
AllocatorOptions: storage.AllocatorOptions{
AllowRebalance: true,
Mode: s.ctx.BalanceMode,
},
}
s.node = NewNode(nCtx)
s.admin = newAdminServer(s.db, s.stopper)
s.status = newStatusServer(s.db, s.gossip, ctx)
s.tsDB = ts.NewDB(s.db)
s.tsServer = ts.NewServer(s.tsDB)
return s, nil
}
开发者ID:gechong,项目名称:cockroach,代码行数:95,代码来源:server.go
示例7: NewServer
// NewServer creates a Server from a server.Context.
func NewServer(ctx *Context, stopper *stop.Stopper) (*Server, error) {
if ctx == nil {
return nil, util.Errorf("ctx must not be null")
}
if _, err := net.ResolveTCPAddr("tcp", ctx.Addr); err != nil {
return nil, util.Errorf("unable to resolve RPC address %q: %v", ctx.Addr, err)
}
if ctx.Insecure {
log.Warning("running in insecure mode, this is strongly discouraged. See --insecure and --certs.")
}
// Try loading the TLS configs before anything else.
if _, err := ctx.GetServerTLSConfig(); err != nil {
return nil, err
}
if _, err := ctx.GetClientTLSConfig(); err != nil {
return nil, err
}
s := &Server{
Tracer: tracing.NewTracer(),
ctx: ctx,
mux: http.NewServeMux(),
clock: hlc.NewClock(hlc.UnixNano),
stopper: stopper,
}
s.clock.SetMaxOffset(ctx.MaxOffset)
s.rpcContext = crpc.NewContext(&ctx.Context, s.clock, stopper)
stopper.RunWorker(func() {
s.rpcContext.RemoteClocks.MonitorRemoteOffsets(stopper)
})
s.rpc = crpc.NewServer(s.rpcContext)
s.gossip = gossip.New(s.rpcContext, s.ctx.GossipBootstrapResolvers, stopper)
s.storePool = storage.NewStorePool(s.gossip, s.clock, ctx.TimeUntilStoreDead, stopper)
feed := util.NewFeed(stopper)
// A custom RetryOptions is created which uses stopper.ShouldDrain() as
// the Closer. This prevents infinite retry loops from occurring during
// graceful server shutdown
//
// Such a loop loop occurs with the DistSender attempts a connection to the
// local server during shutdown, and receives an internal server error (HTTP
// Code 5xx). This is the correct error for a server to return when it is
// shutting down, and is normally retryable in a cluster environment.
// However, on a single-node setup (such as a test), retries will never
// succeed because the only server has been shut down; thus, thus the
// DistSender needs to know that it should not retry in this situation.
retryOpts := kv.GetDefaultDistSenderRetryOptions()
retryOpts.Closer = stopper.ShouldDrain()
ds := kv.NewDistSender(&kv.DistSenderContext{
Clock: s.clock,
RPCContext: s.rpcContext,
RPCRetryOptions: &retryOpts,
}, s.gossip)
txnRegistry := metric.NewRegistry()
txnMetrics := kv.NewTxnMetrics(txnRegistry)
sender := kv.NewTxnCoordSender(ds, s.clock, ctx.Linearizable, s.Tracer, s.stopper, txnMetrics)
s.db = client.NewDB(sender)
s.grpc = grpc.NewServer()
s.raftTransport = storage.NewRaftTransport(storage.GossipAddressResolver(s.gossip), s.grpc, s.rpcContext)
s.kvDB = kv.NewDBServer(&s.ctx.Context, sender, stopper)
if err := s.kvDB.RegisterRPC(s.rpc); err != nil {
return nil, err
}
s.leaseMgr = sql.NewLeaseManager(0, *s.db, s.clock)
s.leaseMgr.RefreshLeases(s.stopper, s.db, s.gossip)
sqlRegistry := metric.NewRegistry()
s.sqlExecutor = sql.NewExecutor(*s.db, s.gossip, s.leaseMgr, s.stopper, sqlRegistry)
s.pgServer = pgwire.MakeServer(&s.ctx.Context, s.sqlExecutor, sqlRegistry)
// TODO(bdarnell): make StoreConfig configurable.
nCtx := storage.StoreContext{
Clock: s.clock,
DB: s.db,
Gossip: s.gossip,
Transport: s.raftTransport,
ScanInterval: s.ctx.ScanInterval,
ScanMaxIdleTime: s.ctx.ScanMaxIdleTime,
EventFeed: feed,
Tracer: s.Tracer,
StorePool: s.storePool,
SQLExecutor: sql.InternalExecutor{
LeaseManager: s.leaseMgr,
},
LogRangeEvents: true,
AllocatorOptions: storage.AllocatorOptions{
AllowRebalance: true,
Mode: s.ctx.BalanceMode,
},
}
//.........这里部分代码省略.........
开发者ID:mrtracy,项目名称:cockroach,代码行数:101,代码来源:server.go
示例8: NewServer
// NewServer creates a Server from a server.Context.
func NewServer(srvCtx Context, stopper *stop.Stopper) (*Server, error) {
if _, err := net.ResolveTCPAddr("tcp", srvCtx.Addr); err != nil {
return nil, errors.Errorf("unable to resolve RPC address %q: %v", srvCtx.Addr, err)
}
if srvCtx.Ctx == nil {
srvCtx.Ctx = context.Background()
}
if srvCtx.Ctx.Done() != nil {
panic("context with cancel or deadline")
}
if tracing.TracerFromCtx(srvCtx.Ctx) == nil {
// TODO(radu): instead of modifying srvCtx.Ctx, we should have a separate
// context.Context inside Server. We will need to rename server.Context
// though.
srvCtx.Ctx = tracing.WithTracer(srvCtx.Ctx, tracing.NewTracer())
}
if srvCtx.Insecure {
log.Warning(srvCtx.Ctx, "running in insecure mode, this is strongly discouraged. See --insecure.")
}
// Try loading the TLS configs before anything else.
if _, err := srvCtx.GetServerTLSConfig(); err != nil {
return nil, err
}
if _, err := srvCtx.GetClientTLSConfig(); err != nil {
return nil, err
}
s := &Server{
mux: http.NewServeMux(),
clock: hlc.NewClock(hlc.UnixNano),
stopper: stopper,
}
// Add a dynamic log tag value for the node ID.
//
// We need to pass the server's Ctx as a base context for the various server
// components, but we won't know the node ID until we Start(). At that point
// it's too late to change the contexts in the components (various background
// processes will have already started using the contexts).
//
// The dynamic value allows us to add the log tag to the context now and
// update the value asynchronously. It's not significantly more expensive than
// a regular tag since it's just doing an (atomic) load when a log/trace
// message is constructed.
s.nodeLogTagVal.Set(log.DynamicIntValueUnknown)
srvCtx.Ctx = log.WithLogTag(srvCtx.Ctx, "n", &s.nodeLogTagVal)
s.ctx = srvCtx
s.clock.SetMaxOffset(srvCtx.MaxOffset)
s.rpcContext = rpc.NewContext(srvCtx.Context, s.clock, s.stopper)
s.rpcContext.HeartbeatCB = func() {
if err := s.rpcContext.RemoteClocks.VerifyClockOffset(); err != nil {
log.Fatal(s.Ctx(), err)
}
}
s.grpc = rpc.NewServer(s.rpcContext)
s.registry = metric.NewRegistry()
s.gossip = gossip.New(
s.Ctx(), s.rpcContext, s.grpc, s.ctx.GossipBootstrapResolvers, s.stopper, s.registry)
s.storePool = storage.NewStorePool(
s.gossip,
s.clock,
s.rpcContext,
srvCtx.ReservationsEnabled,
srvCtx.TimeUntilStoreDead,
s.stopper,
)
// A custom RetryOptions is created which uses stopper.ShouldQuiesce() as
// the Closer. This prevents infinite retry loops from occurring during
// graceful server shutdown
//
// Such a loop loop occurs with the DistSender attempts a connection to the
// local server during shutdown, and receives an internal server error (HTTP
// Code 5xx). This is the correct error for a server to return when it is
// shutting down, and is normally retryable in a cluster environment.
// However, on a single-node setup (such as a test), retries will never
// succeed because the only server has been shut down; thus, thus the
// DistSender needs to know that it should not retry in this situation.
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = s.stopper.ShouldQuiesce()
distSenderCfg := kv.DistSenderConfig{
Ctx: s.Ctx(),
Clock: s.clock,
RPCContext: s.rpcContext,
RPCRetryOptions: &retryOpts,
}
s.distSender = kv.NewDistSender(&distSenderCfg, s.gossip)
txnMetrics := kv.MakeTxnMetrics()
s.registry.AddMetricStruct(txnMetrics)
s.txnCoordSender = kv.NewTxnCoordSender(s.Ctx(), s.distSender, s.clock, srvCtx.Linearizable,
s.stopper, txnMetrics)
s.db = client.NewDB(s.txnCoordSender)
s.raftTransport = storage.NewRaftTransport(storage.GossipAddressResolver(s.gossip), s.grpc, s.rpcContext)
//.........这里部分代码省略.........
开发者ID:yaojingguo,项目名称:cockroach,代码行数:101,代码来源:server.go
示例9: NewServer
// NewServer creates a Server from a server.Context.
func NewServer(ctx *Context, stopper *stop.Stopper) (*Server, error) {
if ctx == nil {
return nil, util.Errorf("ctx must not be null")
}
addr := ctx.Addr
_, err := net.ResolveTCPAddr("tcp", addr)
if err != nil {
return nil, util.Errorf("unable to resolve RPC address %q: %v", addr, err)
}
if ctx.Insecure {
log.Warning("running in insecure mode, this is strongly discouraged. See --insecure and --certs.")
}
// Try loading the TLS configs before anything else.
if _, err := ctx.GetServerTLSConfig(); err != nil {
return nil, err
}
if _, err := ctx.GetClientTLSConfig(); err != nil {
return nil, err
}
s := &Server{
ctx: ctx,
mux: http.NewServeMux(),
clock: hlc.NewClock(hlc.UnixNano),
stopper: stopper,
}
s.clock.SetMaxOffset(ctx.MaxOffset)
rpcContext := rpc.NewContext(&ctx.Context, s.clock, stopper)
stopper.RunWorker(func() {
rpcContext.RemoteClocks.MonitorRemoteOffsets(stopper)
})
s.rpc = rpc.NewServer(util.MakeUnresolvedAddr("tcp", addr), rpcContext)
s.stopper.AddCloser(s.rpc)
s.gossip = gossip.New(rpcContext, s.ctx.GossipInterval, s.ctx.GossipBootstrapResolvers)
s.storePool = storage.NewStorePool(s.gossip, ctx.TimeUntilStoreDead, stopper)
feed := util.NewFeed(stopper)
tracer := tracer.NewTracer(feed, addr)
ds := kv.NewDistSender(&kv.DistSenderContext{Clock: s.clock}, s.gossip)
sender := kv.NewTxnCoordSender(ds, s.clock, ctx.Linearizable, tracer, s.stopper)
if s.db, err = client.Open("//", client.SenderOpt(sender)); err != nil {
return nil, err
}
s.raftTransport, err = newRPCTransport(s.gossip, s.rpc, rpcContext)
if err != nil {
return nil, err
}
s.stopper.AddCloser(s.raftTransport)
s.kvDB = kv.NewDBServer(&s.ctx.Context, sender)
if s.ctx.ExperimentalRPCServer {
if err = s.kvDB.RegisterRPC(s.rpc); err != nil {
return nil, err
}
}
s.sqlServer = sql.MakeHTTPServer(&s.ctx.Context, *s.db)
// TODO(bdarnell): make StoreConfig configurable.
nCtx := storage.StoreContext{
Clock: s.clock,
DB: s.db,
Gossip: s.gossip,
Transport: s.raftTransport,
ScanInterval: s.ctx.ScanInterval,
ScanMaxIdleTime: s.ctx.ScanMaxIdleTime,
EventFeed: feed,
Tracer: tracer,
StorePool: s.storePool,
}
s.node = NewNode(nCtx)
s.admin = newAdminServer(s.db, s.stopper)
s.status = newStatusServer(s.db, s.gossip, ctx)
s.tsDB = ts.NewDB(s.db)
s.tsServer = ts.NewServer(s.tsDB)
return s, nil
}
开发者ID:lennypruss,项目名称:cockroach,代码行数:85,代码来源:server.go
示例10: NewServer
// NewServer creates a Server from a server.Context.
func NewServer(ctx Context, stopper *stop.Stopper) (*Server, error) {
if _, err := net.ResolveTCPAddr("tcp", ctx.Addr); err != nil {
return nil, errors.Errorf("unable to resolve RPC address %q: %v", ctx.Addr, err)
}
if ctx.Insecure {
log.Warning(context.TODO(), "running in insecure mode, this is strongly discouraged. See --insecure.")
}
// Try loading the TLS configs before anything else.
if _, err := ctx.GetServerTLSConfig(); err != nil {
return nil, err
}
if _, err := ctx.GetClientTLSConfig(); err != nil {
return nil, err
}
s := &Server{
Tracer: tracing.NewTracer(),
ctx: ctx,
mux: http.NewServeMux(),
clock: hlc.NewClock(hlc.UnixNano),
stopper: stopper,
}
s.clock.SetMaxOffset(ctx.MaxOffset)
s.rpcContext = rpc.NewContext(ctx.Context, s.clock, s.stopper)
s.rpcContext.HeartbeatCB = func() {
if err := s.rpcContext.RemoteClocks.VerifyClockOffset(); err != nil {
log.Fatal(context.TODO(), err)
}
}
s.grpc = rpc.NewServer(s.rpcContext)
s.registry = metric.NewRegistry()
s.gossip = gossip.New(s.rpcContext, s.grpc, s.ctx.GossipBootstrapResolvers, s.stopper, s.registry)
s.storePool = storage.NewStorePool(
s.gossip,
s.clock,
s.rpcContext,
ctx.ReservationsEnabled,
ctx.TimeUntilStoreDead,
s.stopper,
)
// A custom RetryOptions is created which uses stopper.ShouldQuiesce() as
// the Closer. This prevents infinite retry loops from occurring during
// graceful server shutdown
//
// Such a loop loop occurs with the DistSender attempts a connection to the
// local server during shutdown, and receives an internal server error (HTTP
// Code 5xx). This is the correct error for a server to return when it is
// shutting down, and is normally retryable in a cluster environment.
// However, on a single-node setup (such as a test), retries will never
// succeed because the only server has been shut down; thus, thus the
// DistSender needs to know that it should not retry in this situation.
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = s.stopper.ShouldQuiesce()
s.distSender = kv.NewDistSender(&kv.DistSenderContext{
Clock: s.clock,
RPCContext: s.rpcContext,
RPCRetryOptions: &retryOpts,
}, s.gossip)
txnMetrics := kv.NewTxnMetrics(s.registry)
sender := kv.NewTxnCoordSender(s.distSender, s.clock, ctx.Linearizable, s.Tracer,
s.stopper, txnMetrics)
s.db = client.NewDB(sender)
s.raftTransport = storage.NewRaftTransport(storage.GossipAddressResolver(s.gossip), s.grpc, s.rpcContext)
s.kvDB = kv.NewDBServer(s.ctx.Context, sender, s.stopper)
roachpb.RegisterExternalServer(s.grpc, s.kvDB)
// Set up Lease Manager
var lmKnobs sql.LeaseManagerTestingKnobs
if ctx.TestingKnobs.SQLLeaseManager != nil {
lmKnobs = *ctx.TestingKnobs.SQLLeaseManager.(*sql.LeaseManagerTestingKnobs)
}
s.leaseMgr = sql.NewLeaseManager(0, *s.db, s.clock, lmKnobs, s.stopper)
s.leaseMgr.RefreshLeases(s.stopper, s.db, s.gossip)
// Set up the DistSQL server
distSQLCtx := distsql.ServerContext{
Context: context.Background(),
DB: s.db,
RPCContext: s.rpcContext,
}
s.distSQLServer = distsql.NewServer(distSQLCtx)
distsql.RegisterDistSQLServer(s.grpc, s.distSQLServer)
// Set up Executor
eCtx := sql.ExecutorContext{
Context: context.Background(),
DB: s.db,
Gossip: s.gossip,
LeaseManager: s.leaseMgr,
Clock: s.clock,
DistSQLSrv: s.distSQLServer,
}
if ctx.TestingKnobs.SQLExecutor != nil {
//.........这里部分代码省略.........
开发者ID:yangxuanjia,项目名称:cockroach,代码行数:101,代码来源:server.go
注:本文中的github.com/cockroachdb/cockroach/storage.NewStorePool函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论