本文整理汇总了Golang中github.com/GoogleCloudPlatform/kubernetes/pkg/client/record.NewBroadcaster函数的典型用法代码示例。如果您正苦于以下问题:Golang NewBroadcaster函数的具体用法?Golang NewBroadcaster怎么用?Golang NewBroadcaster使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewBroadcaster函数的18个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: createPodConfigTester
func createPodConfigTester(mode PodConfigNotificationMode) (chan<- interface{}, <-chan kubelet.PodUpdate, *PodConfig) {
eventBroadcaster := record.NewBroadcaster()
config := NewPodConfig(mode, eventBroadcaster.NewRecorder(api.EventSource{Component: "kubelet"}))
channel := config.Channel(TestSource)
ch := config.Updates()
return channel, ch, config
}
开发者ID:chenzhen411,项目名称:kubernetes,代码行数:7,代码来源:config_test.go
示例2: Create
// Create constructs a BuildController
func (factory *BuildControllerFactory) Create() controller.RunnableController {
queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
cache.NewReflector(&buildLW{client: factory.OSClient}, &buildapi.Build{}, queue, 2*time.Minute).Run()
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events(""))
client := ControllerClient{factory.KubeClient, factory.OSClient}
buildController := &buildcontroller.BuildController{
BuildUpdater: factory.BuildUpdater,
ImageStreamClient: client,
PodManager: client,
BuildStrategy: &typeBasedFactoryStrategy{
DockerBuildStrategy: factory.DockerBuildStrategy,
SourceBuildStrategy: factory.SourceBuildStrategy,
CustomBuildStrategy: factory.CustomBuildStrategy,
},
Recorder: eventBroadcaster.NewRecorder(kapi.EventSource{Component: "build-controller"}),
OpenshiftEnabled: factory.OpenshiftEnabled,
}
return &controller.RetryController{
Queue: queue,
RetryManager: controller.NewQueueRetryManager(
queue,
cache.MetaNamespaceKeyFunc,
limitedLogAndRetry(factory.BuildUpdater, 30*time.Minute),
kutil.NewTokenBucketRateLimiter(1, 10)),
Handle: func(obj interface{}) error {
build := obj.(*buildapi.Build)
return buildController.HandleBuild(build)
},
}
}
开发者ID:pombredanne,项目名称:atomic-enterprise,代码行数:35,代码来源:factory.go
示例3: Run
// Run runs the specified SchedulerServer. This should never exit.
func (s *SchedulerServer) Run(_ []string) error {
if s.Kubeconfig == "" && s.Master == "" {
glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.")
}
// This creates a client, first loading any specified kubeconfig
// file, and then overriding the Master flag, if non-empty.
kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig},
&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig()
if err != nil {
return err
}
kubeconfig.QPS = 20.0
kubeconfig.Burst = 30
kubeClient, err := client.New(kubeconfig)
if err != nil {
glog.Fatalf("Invalid API configuration: %v", err)
}
go func() {
mux := http.NewServeMux()
healthz.InstallHandler(mux)
if s.EnableProfiling {
mux.HandleFunc("/debug/pprof/", pprof.Index)
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
}
mux.Handle("/metrics", prometheus.Handler())
server := &http.Server{
Addr: net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)),
Handler: mux,
}
glog.Fatal(server.ListenAndServe())
}()
configFactory := factory.NewConfigFactory(kubeClient)
config, err := s.createConfig(configFactory)
if err != nil {
glog.Fatalf("Failed to create scheduler configuration: %v", err)
}
config.Cloud, err = cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
if err != nil {
glog.Fatalf("Cloud provider could not be initialized: %v", err)
}
eventBroadcaster := record.NewBroadcaster()
config.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"})
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
sched := scheduler.New(config)
sched.Run()
select {}
}
开发者ID:ravigadde,项目名称:kube-scheduler,代码行数:60,代码来源:server.go
示例4: RunKubelet
// RunKubelet is responsible for setting up and running a kubelet. It is used in three different applications:
// 1 Integration tests
// 2 Kubelet binary
// 3 Standalone 'kubernetes' binary
// Eventually, #2 will be replaced with instances of #3
func RunKubelet(kcfg *KubeletConfig, builder KubeletBuilder) error {
kcfg.Hostname = nodeutil.GetHostname(kcfg.HostnameOverride)
if len(kcfg.NodeName) == 0 {
// Query the cloud provider for our node name, default to Hostname
nodeName := kcfg.Hostname
if kcfg.Cloud != nil {
var err error
instances, ok := kcfg.Cloud.Instances()
if !ok {
return fmt.Errorf("failed to get instances from cloud provider")
}
nodeName, err = instances.CurrentNodeName(kcfg.Hostname)
if err != nil {
return fmt.Errorf("error fetching current instance name from cloud provider: %v", err)
}
glog.V(2).Infof("cloud provider determined current node name to be %s", nodeName)
}
kcfg.NodeName = nodeName
}
eventBroadcaster := record.NewBroadcaster()
kcfg.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "kubelet", Host: kcfg.NodeName})
eventBroadcaster.StartLogging(glog.V(3).Infof)
if kcfg.KubeClient != nil {
glog.V(4).Infof("Sending events to api server.")
eventBroadcaster.StartRecordingToSink(kcfg.KubeClient.Events(""))
} else {
glog.Warning("No api server defined - no events will be sent to API server.")
}
capabilities.Setup(kcfg.AllowPrivileged, kcfg.HostNetworkSources)
credentialprovider.SetPreferredDockercfgPath(kcfg.RootDirectory)
if builder == nil {
builder = createAndInitKubelet
}
if kcfg.OSInterface == nil {
kcfg.OSInterface = kubecontainer.RealOS{}
}
k, podCfg, err := builder(kcfg)
if err != nil {
return fmt.Errorf("failed to create kubelet: %v", err)
}
// process pods and exit.
if kcfg.Runonce {
if _, err := k.RunOnce(podCfg.Updates()); err != nil {
return fmt.Errorf("runonce failed: %v", err)
}
glog.Infof("Started kubelet as runonce")
} else {
startKubelet(k, podCfg, kcfg)
glog.Infof("Started kubelet")
}
return nil
}
开发者ID:nail-lian,项目名称:kubernetes,代码行数:64,代码来源:server.go
示例5: Create
// Create creates a DeploymentConfigChangeController.
func (factory *DeploymentConfigChangeControllerFactory) Create() controller.RunnableController {
deploymentConfigLW := &deployutil.ListWatcherImpl{
ListFunc: func() (runtime.Object, error) {
return factory.Client.DeploymentConfigs(kapi.NamespaceAll).List(labels.Everything(), fields.Everything())
},
WatchFunc: func(resourceVersion string) (watch.Interface, error) {
return factory.Client.DeploymentConfigs(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)
},
}
queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
cache.NewReflector(deploymentConfigLW, &deployapi.DeploymentConfig{}, queue, 2*time.Minute).Run()
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events(""))
changeController := &DeploymentConfigChangeController{
changeStrategy: &changeStrategyImpl{
getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) {
return factory.KubeClient.ReplicationControllers(namespace).Get(name)
},
generateDeploymentConfigFunc: func(namespace, name string) (*deployapi.DeploymentConfig, error) {
return factory.Client.DeploymentConfigs(namespace).Generate(name)
},
updateDeploymentConfigFunc: func(namespace string, config *deployapi.DeploymentConfig) (*deployapi.DeploymentConfig, error) {
return factory.Client.DeploymentConfigs(namespace).Update(config)
},
},
decodeConfig: func(deployment *kapi.ReplicationController) (*deployapi.DeploymentConfig, error) {
return deployutil.DecodeDeploymentConfig(deployment, factory.Codec)
},
recorder: eventBroadcaster.NewRecorder(kapi.EventSource{Component: "deployer"}),
}
return &controller.RetryController{
Queue: queue,
RetryManager: controller.NewQueueRetryManager(
queue,
cache.MetaNamespaceKeyFunc,
func(obj interface{}, err error, retries controller.Retry) bool {
kutil.HandleError(err)
if _, isFatal := err.(fatalError); isFatal {
return false
}
if retries.Count > 0 {
return false
}
return true
},
kutil.NewTokenBucketRateLimiter(1, 10),
),
Handle: func(obj interface{}) error {
config := obj.(*deployapi.DeploymentConfig)
return changeController.Handle(config)
},
}
}
开发者ID:cjnygard,项目名称:origin,代码行数:57,代码来源:factory.go
示例6: NewPluginConfig
func (k *KubernetesScheduler) NewPluginConfig(terminate <-chan struct{}, mux *http.ServeMux,
podsWatcher *cache.ListWatch) *PluginConfig {
// Watch and queue pods that need scheduling.
updates := make(chan queue.Entry, k.schedcfg.UpdatesBacklog)
podUpdates := &podStoreAdapter{queue.NewHistorical(updates)}
reflector := cache.NewReflector(podsWatcher, &api.Pod{}, podUpdates, 0)
// lock that guards critial sections that involve transferring pods from
// the store (cache) to the scheduling queue; its purpose is to maintain
// an ordering (vs interleaving) of operations that's easier to reason about.
kapi := &k8smScheduler{internal: k}
q := newQueuer(podUpdates)
podDeleter := &deleter{
api: kapi,
qr: q,
}
eh := &errorHandler{
api: kapi,
backoff: backoff.New(k.schedcfg.InitialPodBackoff.Duration, k.schedcfg.MaxPodBackoff.Duration),
qr: q,
}
startLatch := make(chan struct{})
eventBroadcaster := record.NewBroadcaster()
runtime.On(startLatch, func() {
eventBroadcaster.StartRecordingToSink(k.client.Events(""))
reflector.Run() // TODO(jdef) should listen for termination
podDeleter.Run(updates, terminate)
q.Run(terminate)
q.installDebugHandlers(mux)
podtask.InstallDebugHandlers(k.taskRegistry, mux)
})
return &PluginConfig{
Config: &plugin.Config{
MinionLister: nil,
Algorithm: &kubeScheduler{
api: kapi,
podUpdates: podUpdates,
defaultContainerCPULimit: k.defaultContainerCPULimit,
defaultContainerMemLimit: k.defaultContainerMemLimit,
},
Binder: &binder{api: kapi},
NextPod: q.yield,
Error: eh.handleSchedulingError,
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}),
},
api: kapi,
client: k.client,
qr: q,
deleter: podDeleter,
starting: startLatch,
}
}
开发者ID:varunkumar09,项目名称:kubernetes,代码行数:54,代码来源:plugin.go
示例7: RunScheduler
// RunScheduler starts the Kubernetes scheduler
func (c *MasterConfig) RunScheduler() {
config, err := c.createSchedulerConfig()
if err != nil {
glog.Fatalf("Unable to start scheduler: %v", err)
}
eventcast := record.NewBroadcaster()
config.Recorder = eventcast.NewRecorder(kapi.EventSource{Component: "scheduler"})
eventcast.StartRecordingToSink(c.KubeClient.Events(""))
s := scheduler.New(config)
s.Run()
glog.Infof("Started Kubernetes Scheduler")
}
开发者ID:dustintownsend,项目名称:origin,代码行数:14,代码来源:master.go
示例8: New
// New returns a new service controller to keep cloud provider service resources
// (like external load balancers) in sync with the registry.
func New(cloud cloudprovider.Interface, kubeClient client.Interface, clusterName string) *ServiceController {
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(kubeClient.Events(""))
recorder := broadcaster.NewRecorder(api.EventSource{Component: "service-controller"})
return &ServiceController{
cloud: cloud,
kubeClient: kubeClient,
clusterName: clusterName,
cache: &serviceCache{serviceMap: make(map[string]*cachedService)},
eventBroadcaster: broadcaster,
eventRecorder: recorder,
}
}
开发者ID:chenzhen411,项目名称:kubernetes,代码行数:16,代码来源:servicecontroller.go
示例9: NewReplicationManager
// NewReplicationManager creates a new ReplicationManager.
func NewReplicationManager(kubeClient client.Interface) *ReplicationManager {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
rm := &ReplicationManager{
kubeClient: kubeClient,
podControl: RealPodControl{
kubeClient: kubeClient,
recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "replication-controller"}),
},
}
rm.syncHandler = rm.syncReplicationController
return rm
}
开发者ID:SivagnanamCiena,项目名称:calico-kubernetes,代码行数:16,代码来源:replication_controller.go
示例10: TestUnschedulableNodes
func TestUnschedulableNodes(t *testing.T) {
helper, err := framework.NewHelper()
if err != nil {
t.Fatalf("Couldn't create etcd helper: %v", err)
}
framework.DeleteAllEtcdKeys()
var m *master.Master
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
m.Handler.ServeHTTP(w, req)
}))
defer s.Close()
m = master.New(&master.Config{
EtcdHelper: helper,
KubeletClient: client.FakeKubeletClient{},
EnableCoreControllers: true,
EnableLogsSupport: false,
EnableUISupport: false,
EnableIndex: true,
APIPrefix: "/api",
// Enable v1beta3 if we are testing that version.
EnableV1Beta3: testapi.Version() == "v1beta3",
Authorizer: apiserver.NewAlwaysAllowAuthorizer(),
AdmissionControl: admit.NewAlwaysAdmit(),
})
restClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version()})
schedulerConfigFactory := factory.NewConfigFactory(restClient)
schedulerConfig, err := schedulerConfigFactory.Create()
if err != nil {
t.Fatalf("Couldn't create scheduler config: %v", err)
}
eventBroadcaster := record.NewBroadcaster()
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"})
eventBroadcaster.StartRecordingToSink(restClient.Events(""))
scheduler.New(schedulerConfig).Run()
defer close(schedulerConfig.StopEverything)
DoTestUnschedulableNodes(t, restClient, schedulerConfigFactory.NodeLister.Store)
}
开发者ID:EricCheung3,项目名称:kubernetes,代码行数:43,代码来源:scheduler_test.go
示例11: NewNodeController
// NewNodeController returns a new node controller to sync instances from cloudprovider.
func NewNodeController(
cloud cloudprovider.Interface,
kubeClient client.Interface,
registerRetryCount int,
podEvictionTimeout time.Duration,
podEvictor *PodEvictor,
nodeMonitorGracePeriod time.Duration,
nodeStartupGracePeriod time.Duration,
nodeMonitorPeriod time.Duration,
clusterCIDR *net.IPNet,
allocateNodeCIDRs bool) *NodeController {
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "controllermanager"})
eventBroadcaster.StartLogging(glog.Infof)
if kubeClient != nil {
glog.Infof("Sending events to api server.")
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
} else {
glog.Infof("No api server defined - no events will be sent to API server.")
}
if allocateNodeCIDRs && clusterCIDR == nil {
glog.Fatal("NodeController: Must specify clusterCIDR if allocateNodeCIDRs == true.")
}
return &NodeController{
cloud: cloud,
kubeClient: kubeClient,
recorder: recorder,
registerRetryCount: registerRetryCount,
podEvictionTimeout: podEvictionTimeout,
podEvictor: podEvictor,
nodeStatusMap: make(map[string]nodeStatusData),
nodeMonitorGracePeriod: nodeMonitorGracePeriod,
nodeMonitorPeriod: nodeMonitorPeriod,
nodeStartupGracePeriod: nodeStartupGracePeriod,
lookupIP: net.LookupIP,
now: util.Now,
clusterCIDR: clusterCIDR,
allocateNodeCIDRs: allocateNodeCIDRs,
}
}
开发者ID:Ima8,项目名称:kubernetes,代码行数:41,代码来源:nodecontroller.go
示例12: NewNodeController
// NewNodeController returns a new node controller to sync instances from cloudprovider.
func NewNodeController(
cloud cloudprovider.Interface,
matchRE string,
nodes []string,
staticResources *api.NodeResources,
kubeClient client.Interface,
registerRetryCount int,
podEvictionTimeout time.Duration,
deletingPodsRateLimiter util.RateLimiter,
nodeMonitorGracePeriod time.Duration,
nodeStartupGracePeriod time.Duration,
nodeMonitorPeriod time.Duration,
clusterName string) *NodeController {
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "controllermanager"})
if kubeClient != nil {
glog.Infof("Sending events to api server.")
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
} else {
glog.Infof("No api server defined - no events will be sent to API server.")
}
return &NodeController{
cloud: cloud,
matchRE: matchRE,
nodes: nodes,
staticResources: staticResources,
kubeClient: kubeClient,
recorder: recorder,
registerRetryCount: registerRetryCount,
podEvictionTimeout: podEvictionTimeout,
deletingPodsRateLimiter: deletingPodsRateLimiter,
nodeStatusMap: make(map[string]nodeStatusData),
nodeMonitorGracePeriod: nodeMonitorGracePeriod,
nodeMonitorPeriod: nodeMonitorPeriod,
nodeStartupGracePeriod: nodeStartupGracePeriod,
lookupIP: net.LookupIP,
now: util.Now,
clusterName: clusterName,
}
}
开发者ID:SivagnanamCiena,项目名称:calico-kubernetes,代码行数:41,代码来源:nodecontroller.go
示例13: RunKubelet
// RunKubelet is responsible for setting up and running a kubelet. It is used in three different applications:
// 1 Integration tests
// 2 Kubelet binary
// 3 Standalone 'kubernetes' binary
// Eventually, #2 will be replaced with instances of #3
func RunKubelet(kcfg *KubeletConfig, builder KubeletBuilder) error {
kcfg.Hostname = util.GetHostname(kcfg.HostnameOverride)
eventBroadcaster := record.NewBroadcaster()
kcfg.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "kubelet", Host: kcfg.Hostname})
eventBroadcaster.StartLogging(glog.V(3).Infof)
if kcfg.KubeClient != nil {
glog.V(4).Infof("Sending events to api server.")
eventBroadcaster.StartRecordingToSink(kcfg.KubeClient.Events(""))
} else {
glog.Warning("No api server defined - no events will be sent to API server.")
}
capabilities.Setup(kcfg.AllowPrivileged, kcfg.HostNetworkSources)
credentialprovider.SetPreferredDockercfgPath(kcfg.RootDirectory)
if builder == nil {
builder = createAndInitKubelet
}
if kcfg.OSInterface == nil {
kcfg.OSInterface = kubecontainer.RealOS{}
}
k, podCfg, err := builder(kcfg)
if err != nil {
return fmt.Errorf("failed to create kubelet: %v", err)
}
// process pods and exit.
if kcfg.Runonce {
if _, err := k.RunOnce(podCfg.Updates()); err != nil {
return fmt.Errorf("runonce failed: %v", err)
}
glog.Infof("Started kubelet as runonce")
} else {
startKubelet(k, podCfg, kcfg)
glog.Infof("Started kubelet")
}
return nil
}
开发者ID:cjnygard,项目名称:origin,代码行数:42,代码来源:server.go
示例14: startComponents
func startComponents(firstManifestURL, secondManifestURL, apiVersion string) (string, string) {
// Setup
servers := []string{}
glog.Infof("Creating etcd client pointing to %v", servers)
handler := delegateHandler{}
apiServer := httptest.NewServer(&handler)
etcdClient := etcd.NewClient(servers)
sleep := 4 * time.Second
ok := false
for i := 0; i < 3; i++ {
keys, err := etcdClient.Get("/", false, false)
if err != nil {
glog.Warningf("Unable to list root etcd keys: %v", err)
if i < 2 {
time.Sleep(sleep)
sleep = sleep * sleep
}
continue
}
for _, node := range keys.Node.Nodes {
if _, err := etcdClient.Delete(node.Key, true); err != nil {
glog.Fatalf("Unable delete key: %v", err)
}
}
ok = true
break
}
if !ok {
glog.Fatalf("Failed to connect to etcd")
}
cl := client.NewOrDie(&client.Config{Host: apiServer.URL, Version: apiVersion})
etcdStorage, err := master.NewEtcdStorage(etcdClient, latest.InterfacesFor, latest.Version, etcdtest.PathPrefix())
if err != nil {
glog.Fatalf("Unable to get etcd storage: %v", err)
}
expEtcdStorage, err := master.NewEtcdStorage(etcdClient, explatest.InterfacesFor, explatest.Version, etcdtest.PathPrefix())
if err != nil {
glog.Fatalf("Unable to get etcd storage for experimental: %v", err)
}
// Master
host, port, err := net.SplitHostPort(strings.TrimLeft(apiServer.URL, "http://"))
if err != nil {
glog.Fatalf("Unable to parse URL '%v': %v", apiServer.URL, err)
}
portNumber, err := strconv.Atoi(port)
if err != nil {
glog.Fatalf("Nonnumeric port? %v", err)
}
publicAddress := net.ParseIP(host)
if publicAddress == nil {
glog.Fatalf("no public address for %s", host)
}
// Create a master and install handlers into mux.
m := master.New(&master.Config{
DatabaseStorage: etcdStorage,
ExpDatabaseStorage: expEtcdStorage,
KubeletClient: fakeKubeletClient{},
EnableCoreControllers: true,
EnableLogsSupport: false,
EnableProfiling: true,
APIPrefix: "/api",
ExpAPIPrefix: "/experimental",
Authorizer: apiserver.NewAlwaysAllowAuthorizer(),
AdmissionControl: admit.NewAlwaysAdmit(),
ReadWritePort: portNumber,
PublicAddress: publicAddress,
CacheTimeout: 2 * time.Second,
})
handler.delegate = m.Handler
// Scheduler
schedulerConfigFactory := factory.NewConfigFactory(cl, nil)
schedulerConfig, err := schedulerConfigFactory.Create()
if err != nil {
glog.Fatalf("Couldn't create scheduler config: %v", err)
}
eventBroadcaster := record.NewBroadcaster()
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"})
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(cl.Events(""))
scheduler.New(schedulerConfig).Run()
endpoints := endpointcontroller.NewEndpointController(cl)
// ensure the service endpoints are sync'd several times within the window that the integration tests wait
go endpoints.Run(3, util.NeverStop)
controllerManager := replicationControllerPkg.NewReplicationManager(cl, replicationControllerPkg.BurstReplicas)
// TODO: Write an integration test for the replication controllers watch.
go controllerManager.Run(3, util.NeverStop)
nodeController := nodecontroller.NewNodeController(nil, cl, 5*time.Minute, nodecontroller.NewPodEvictor(util.NewFakeRateLimiter()),
40*time.Second, 60*time.Second, 5*time.Second, nil, false)
//.........这里部分代码省略.........
开发者ID:Bazooki,项目名称:kubernetes,代码行数:101,代码来源:integration.go
示例15: TestScheduler
func TestScheduler(t *testing.T) {
eventBroadcaster := record.NewBroadcaster()
defer eventBroadcaster.StartLogging(t.Logf).Stop()
errS := errors.New("scheduler")
errB := errors.New("binder")
table := []struct {
injectBindError error
sendPod *api.Pod
algo scheduler.Scheduler
expectErrorPod *api.Pod
expectAssumedPod *api.Pod
expectError error
expectBind *api.Binding
eventReason string
}{
{
sendPod: podWithID("foo", ""),
algo: mockScheduler{"machine1", nil},
expectBind: &api.Binding{ObjectMeta: api.ObjectMeta{Name: "foo"}, Target: api.ObjectReference{Kind: "Node", Name: "machine1"}},
expectAssumedPod: podWithID("foo", "machine1"),
eventReason: "scheduled",
}, {
sendPod: podWithID("foo", ""),
algo: mockScheduler{"machine1", errS},
expectError: errS,
expectErrorPod: podWithID("foo", ""),
eventReason: "failedScheduling",
}, {
sendPod: podWithID("foo", ""),
algo: mockScheduler{"machine1", nil},
expectBind: &api.Binding{ObjectMeta: api.ObjectMeta{Name: "foo"}, Target: api.ObjectReference{Kind: "Node", Name: "machine1"}},
injectBindError: errB,
expectError: errB,
expectErrorPod: podWithID("foo", ""),
eventReason: "failedScheduling",
},
}
for i, item := range table {
var gotError error
var gotPod *api.Pod
var gotAssumedPod *api.Pod
var gotBinding *api.Binding
c := &Config{
Modeler: &FakeModeler{
AssumePodFunc: func(pod *api.Pod) {
gotAssumedPod = pod
},
},
MinionLister: scheduler.FakeMinionLister(
api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
),
Algorithm: item.algo,
Binder: fakeBinder{func(b *api.Binding) error {
gotBinding = b
return item.injectBindError
}},
Error: func(p *api.Pod, err error) {
gotPod = p
gotError = err
},
NextPod: func() *api.Pod {
return item.sendPod
},
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}),
}
s := New(c)
called := make(chan struct{})
events := eventBroadcaster.StartEventWatcher(func(e *api.Event) {
if e, a := item.eventReason, e.Reason; e != a {
t.Errorf("%v: expected %v, got %v", i, e, a)
}
close(called)
})
s.scheduleOne()
if e, a := item.expectAssumedPod, gotAssumedPod; !reflect.DeepEqual(e, a) {
t.Errorf("%v: assumed pod: wanted %v, got %v", i, e, a)
}
if e, a := item.expectErrorPod, gotPod; !reflect.DeepEqual(e, a) {
t.Errorf("%v: error pod: wanted %v, got %v", i, e, a)
}
if e, a := item.expectError, gotError; !reflect.DeepEqual(e, a) {
t.Errorf("%v: error: wanted %v, got %v", i, e, a)
}
if e, a := item.expectBind, gotBinding; !reflect.DeepEqual(e, a) {
t.Errorf("%v: error: %s", i, util.ObjectDiff(e, a))
}
<-called
events.Stop()
}
}
开发者ID:SivagnanamCiena,项目名称:calico-kubernetes,代码行数:92,代码来源:scheduler_test.go
示例16: TestSchedulerForgetAssumedPodAfterDelete
func TestSchedulerForgetAssumedPodAfterDelete(t *testing.T) {
eventBroadcaster := record.NewBroadcaster()
defer eventBroadcaster.StartLogging(t.Logf).Stop()
// Setup modeler so we control the contents of all 3 stores: assumed,
// scheduled and queued
scheduledPodStore := cache.NewStore(cache.MetaNamespaceKeyFunc)
scheduledPodLister := &cache.StoreToPodLister{scheduledPodStore}
queuedPodStore := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
queuedPodLister := &cache.StoreToPodLister{queuedPodStore}
modeler := NewSimpleModeler(queuedPodLister, scheduledPodLister)
// Create a fake clock used to timestamp entries and calculate ttl. Nothing
// will expire till we flip to something older than the ttl, at which point
// all entries inserted with fakeTime will expire.
ttl := 30 * time.Second
fakeTime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
fakeClock := &util.FakeClock{fakeTime}
ttlPolicy := &cache.TTLPolicy{ttl, fakeClock}
assumedPodsStore := cache.NewFakeExpirationStore(
cache.MetaNamespaceKeyFunc, nil, ttlPolicy, fakeClock)
modeler.assumedPods = &cache.StoreToPodLister{assumedPodsStore}
// Port is the easiest way to cause a fit predicate failure
podPort := 8080
firstPod := podWithPort("foo", "", podPort)
// Create the scheduler config
algo := scheduler.NewGenericScheduler(
map[string]scheduler.FitPredicate{"PodFitsPorts": scheduler.PodFitsPorts},
[]scheduler.PriorityConfig{},
modeler.PodLister(),
rand.New(rand.NewSource(time.Now().UnixNano())))
var gotBinding *api.Binding
c := &Config{
Modeler: modeler,
MinionLister: scheduler.FakeMinionLister(
api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
),
Algorithm: algo,
Binder: fakeBinder{func(b *api.Binding) error {
scheduledPodStore.Add(podWithPort(b.Name, b.Target.Name, podPort))
gotBinding = b
return nil
}},
NextPod: func() *api.Pod {
return queuedPodStore.Pop().(*api.Pod)
},
Error: func(p *api.Pod, err error) {
t.Errorf("Unexpected error when scheduling pod %+v: %v", p, err)
},
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}),
}
// First scheduling pass should schedule the pod
s := New(c)
called := make(chan struct{})
events := eventBroadcaster.StartEventWatcher(func(e *api.Event) {
if e, a := "scheduled", e.Reason; e != a {
t.Errorf("expected %v, got %v", e, a)
}
close(called)
})
queuedPodStore.Add(firstPod)
// queuedPodStore: [foo:8080]
// scheduledPodStore: []
// assumedPods: []
s.scheduleOne()
// queuedPodStore: []
// scheduledPodStore: [foo:8080]
// assumedPods: [foo:8080]
pod, exists, _ := scheduledPodStore.GetByKey("foo")
if !exists {
t.Errorf("Expected scheduled pod store to contain pod")
}
pod, exists, _ = queuedPodStore.GetByKey("foo")
if exists {
t.Errorf("Did not expect a queued pod, found %+v", pod)
}
pod, exists, _ = assumedPodsStore.GetByKey("foo")
if !exists {
t.Errorf("Assumed pod store should contain stale pod")
}
expectBind := &api.Binding{
ObjectMeta: api.ObjectMeta{Name: "foo"},
Target: api.ObjectReference{Kind: "Node", Name: "machine1"},
}
if ex, ac := expectBind, gotBinding; !reflect.DeepEqual(ex, ac) {
t.Errorf("Expected exact match on binding: %s", util.ObjectDiff(ex, ac))
}
<-called
events.Stop()
//.........这里部分代码省略.........
开发者ID:SivagnanamCiena,项目名称:calico-kubernetes,代码行数:101,代码来源:scheduler_test.go
示例17: Create
// Create creates a DeploymentConfigController.
func (factory *DeploymentConfigControllerFactory) Create() controller.RunnableController {
deploymentConfigLW := &deployutil.ListWatcherImpl{
ListFunc: func() (runtime.Object, error) {
return factory.Client.DeploymentConfigs(kapi.NamespaceAll).List(labels.Everything(), fields.Everything())
},
WatchFunc: func(resourceVersion string) (watch.Interface, error) {
return factory.Client.DeploymentConfigs(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)
},
}
queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
cache.NewReflector(deploymentConfigLW, &deployapi.DeploymentConfig{}, queue, 2*time.Minute).Run()
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events(""))
configController := &DeploymentConfigController{
deploymentClient: &deploymentClientImpl{
createDeploymentFunc: func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) {
return factory.KubeClient.ReplicationControllers(namespace).Create(deployment)
},
listDeploymentsForConfigFunc: func(namespace, configName string) (*kapi.ReplicationControllerList, error) {
sel := deployutil.ConfigSelector(configName)
list, err := factory.KubeClient.ReplicationControllers(namespace).List(sel)
if err != nil {
return nil, err
}
return list, nil
},
updateDeploymentFunc: func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) {
return factory.KubeClient.ReplicationControllers(namespace).Update(deployment)
},
},
makeDeployment: func(config *deployapi.DeploymentConfig) (*kapi.ReplicationController, error) {
return deployutil.MakeDeployment(config, factory.Codec)
},
recorder: eventBroadcaster.NewRecorder(kapi.EventSource{Component: "deployer"}),
}
return &controller.RetryController{
Queue: queue,
RetryManager: controller.NewQueueRetryManager(
queue,
cache.MetaNamespaceKeyFunc,
func(obj interface{}, err error, retries controller.Retry) bool {
config := obj.(*deployapi.DeploymentConfig)
// no retries for a fatal error
if _, isFatal := err.(fatalError); isFatal {
glog.V(4).Infof("Will not retry fatal error for deploymentConfig %s/%s: %v", config.Namespace, config.Name, err)
kutil.HandleError(err)
return false
}
// infinite retries for a transient error
if _, isTransient := err.(transientError); isTransient {
glog.V(4).Infof("Retrying deploymentConfig %s/%s with error: %v", config.Namespace, config.Name, err)
return true
}
kutil.HandleError(err)
// no retries for anything else
if retries.Count > 0 {
return false
}
return true
},
kutil.NewTokenBucketRateLimiter(1, 10),
),
Handle: func(obj interface{}) error {
config := obj.(*deployapi.DeploymentConfig)
return configController.Handle(config)
},
}
}
开发者ID:nstrug,项目名称:origin,代码行数:72,代码来源:factory.go
示例18: NewReplicationManager
// NewReplicationManager creates a new ReplicationManager.
func NewReplicationManager(kubeClient client.Interface, burstReplicas int) *ReplicationManager {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
rm := &ReplicationManager{
kubeClient: kubeClient,
podControl: controller.RealPodControl{
KubeClient: kubeClient,
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "replication-controller"}),
},
burstReplicas: burstReplicas,
expectations: controller.NewControllerExpectations(),
queue: workqueue.New(),
}
rm.rcStore.Store, rm.rcController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return rm.kubeClient.ReplicationControllers(api.NamespaceAll).List(labels.Everything())
},
WatchFunc: func(rv string) (watch.Interface, error) {
return rm.kubeClient.ReplicationControllers(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv)
},
},
&api.ReplicationController{},
FullControllerResyncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: rm.enqueueController,
UpdateFunc: func(old, cur interface{}) {
// We only really need to do this when spec changes, but for correctness it is safer to
// periodically double check. It is overkill for 2 reasons:
// 1. Status.Replica updates will cause a sync
// 2. Every 30s we will get a full resync (this will happen anyway every 5 minutes when pods relist)
// However, it shouldn't be that bad as rcs that haven't met expectations won't sync, and all
// the listing is done using local stores.
oldRC := old.(*api.ReplicationController)
curRC := cur.(*api.ReplicationController)
if oldRC.Status.Replicas != curRC.Status.Replicas {
glog.V(4).Infof("Observed updated replica count for rc: %v, %d->%d", curRC.Name, oldRC.Status.Replicas, curRC.Status.Replicas)
}
rm.enqueueController(cur)
},
// This will enter the sync loop and no-op, becuase the controller has been deleted from the store.
// Note that deleting a controller immediately after scaling it to 0 will not work. The recommended
// way of achieving this is by performing a `stop` operation on the controller.
DeleteFunc: rm.enqueueController,
},
)
rm.podStore.Store, rm.podController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return rm.kubeClient.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
},
WatchFunc: func(rv string) (watch.Interface, error) {
return rm.kubeClient.Pods(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv)
},
},
&api.Pod{},
PodRelistPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: rm.addPod,
// This invokes the rc for every pod change, eg: host assignment. Though this might seem like overkill
// the most frequent pod update is status, and the associated rc will only list from local storage, so
// it should be ok.
UpdateFunc: rm.updatePod,
DeleteFunc: rm.deletePod,
},
)
rm.syncHandler = rm.syncReplicationController
rm.podStoreSynced = rm.podController.HasSynced
return rm
}
开发者ID:newstatusflowtesting,项目名称:kubernetes,代码行数:76,代码来源:replication_controller.go
注:本文中的github.com/GoogleCloudPlatform/kubernetes/pkg/client/record.NewBroadcaster函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论