本文整理汇总了Golang中github.com/GoogleCloudPlatform/kubernetes/pkg/api.NewDeleteOptions函数的典型用法代码示例。如果您正苦于以下问题:Golang NewDeleteOptions函数的具体用法?Golang NewDeleteOptions怎么用?Golang NewDeleteOptions使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewDeleteOptions函数的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: ReapResult
func ReapResult(r *resource.Result, f *cmdutil.Factory, out io.Writer, isDefaultDelete, ignoreNotFound bool, timeout time.Duration, gracePeriod int, shortOutput bool, mapper meta.RESTMapper) error {
found := 0
if ignoreNotFound {
r = r.IgnoreErrors(errors.IsNotFound)
}
err := r.Visit(func(info *resource.Info) error {
found++
reaper, err := f.Reaper(info.Mapping)
if err != nil {
// If there is no reaper for this resources and the user didn't explicitly ask for stop.
if kubectl.IsNoSuchReaperError(err) && isDefaultDelete {
return deleteResource(info, out, shortOutput, mapper)
}
return cmdutil.AddSourceToErr("reaping", info.Source, err)
}
var options *api.DeleteOptions
if gracePeriod >= 0 {
options = api.NewDeleteOptions(int64(gracePeriod))
}
if _, err := reaper.Stop(info.Namespace, info.Name, timeout, options); err != nil {
return cmdutil.AddSourceToErr("stopping", info.Source, err)
}
cmdutil.PrintSuccess(mapper, shortOutput, out, info.Mapping.Resource, info.Name, "deleted")
return nil
})
if err != nil {
return err
}
if found == 0 {
fmt.Fprintf(out, "No resources found\n")
}
return nil
}
开发者ID:newstatusflowtesting,项目名称:kubernetes,代码行数:33,代码来源:delete.go
示例2: checkExistingRCRecovers
func checkExistingRCRecovers(f Framework) {
By("assert that the pre-existing replication controller recovers")
podClient := f.Client.Pods(f.Namespace.Name)
rcSelector := labels.Set{"name": "baz"}.AsSelector()
By("deleting pods from existing replication controller")
expectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
pods, err := podClient.List(rcSelector, fields.Everything())
Expect(err).NotTo(HaveOccurred())
if len(pods.Items) == 0 {
return false, nil
}
for _, pod := range pods.Items {
err = podClient.Delete(pod.Name, api.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
}
return true, nil
}))
By("waiting for replication controller to recover")
expectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
pods, err := podClient.List(rcSelector, fields.Everything())
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods.Items {
if api.IsPodReady(&pod) {
return true, nil
}
}
return false, nil
}))
}
开发者ID:mbforbes,项目名称:kubernetes,代码行数:31,代码来源:etcd_failure.go
示例3: TestDeleteNoGraceful
func (t *Tester) TestDeleteNoGraceful(createFn func() runtime.Object, wasGracefulFn func() bool) {
existing := createFn()
objectMeta, err := api.ObjectMetaFor(existing)
if err != nil {
t.Fatalf("object does not have ObjectMeta: %v\n%#v", err, existing)
}
ctx := api.WithNamespace(t.TestContext(), objectMeta.Namespace)
_, err = t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewDeleteOptions(10))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if _, err := t.storage.(rest.Getter).Get(ctx, objectMeta.Name); !errors.IsNotFound(err) {
t.Errorf("unexpected error, object should not exist: %v", err)
}
if wasGracefulFn() {
t.Errorf("resource should not support graceful delete")
}
}
开发者ID:eghobo,项目名称:kubedash,代码行数:18,代码来源:resttest.go
示例4: Delete
// Delete removes the item from etcd.
func (e *Etcd) Delete(ctx api.Context, name string, options *api.DeleteOptions) (runtime.Object, error) {
key, err := e.KeyFunc(ctx, name)
if err != nil {
return nil, err
}
obj := e.NewFunc()
trace := util.NewTrace("Delete " + reflect.TypeOf(obj).String())
defer trace.LogIfLong(time.Second)
trace.Step("About to read object")
if err := e.Helper.ExtractObj(key, obj, false); err != nil {
return nil, etcderr.InterpretDeleteError(err, e.EndpointName, name)
}
// support older consumers of delete by treating "nil" as delete immediately
if options == nil {
options = api.NewDeleteOptions(0)
}
graceful, pendingGraceful, err := rest.BeforeDelete(e.DeleteStrategy, ctx, obj, options)
if err != nil {
return nil, err
}
if pendingGraceful {
return e.finalizeDelete(obj, false)
}
if graceful && *options.GracePeriodSeconds != 0 {
trace.Step("Graceful deletion")
out := e.NewFunc()
if err := e.Helper.SetObj(key, obj, out, uint64(*options.GracePeriodSeconds)); err != nil {
return nil, etcderr.InterpretUpdateError(err, e.EndpointName, name)
}
return e.finalizeDelete(out, true)
}
// delete immediately, or no graceful deletion supported
out := e.NewFunc()
trace.Step("About to delete object")
if err := e.Helper.DeleteObj(key, out); err != nil {
return nil, etcderr.InterpretDeleteError(err, e.EndpointName, name)
}
return e.finalizeDelete(out, true)
}
开发者ID:brandon-adams,项目名称:origin,代码行数:43,代码来源:etcd.go
示例5: TestEtcdDeletePodMultipleContainers
func TestEtcdDeletePodMultipleContainers(t *testing.T) {
registry, _, _, fakeClient, _ := newStorage(t)
ctx := api.NewDefaultContext()
fakeClient.TestIndex = true
key, _ := registry.KeyFunc(ctx, "foo")
fakeClient.Set(key, runtime.EncodeOrDie(latest.Codec, &api.Pod{
ObjectMeta: api.ObjectMeta{Name: "foo"},
Spec: api.PodSpec{Host: "machine"},
}), 0)
_, err := registry.Delete(ctx, "foo", api.NewDeleteOptions(0))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(fakeClient.DeletedKeys) != 1 {
t.Errorf("Expected 1 delete, found %#v", fakeClient.DeletedKeys)
}
if fakeClient.DeletedKeys[0] != key {
t.Errorf("Unexpected key: %s, expected %s", fakeClient.DeletedKeys[0], key)
}
}
开发者ID:SivagnanamCiena,项目名称:calico-kubernetes,代码行数:21,代码来源:etcd_test.go
示例6: runBuildCompletePodDeleteTest
func runBuildCompletePodDeleteTest(t *testing.T, clusterAdminClient *client.Client, clusterAdminKubeClient *kclient.Client) {
buildWatch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(labels.Everything(), fields.Everything(), "0")
if err != nil {
t.Fatalf("Couldn't subscribe to Builds %v", err)
}
defer buildWatch.Stop()
created, err := clusterAdminClient.Builds(testutil.Namespace()).Create(mockBuild())
if err != nil {
t.Fatalf("Couldn't create Build: %v", err)
}
podWatch, err := clusterAdminKubeClient.Pods(testutil.Namespace()).Watch(labels.Everything(), fields.Everything(), created.ResourceVersion)
if err != nil {
t.Fatalf("Couldn't subscribe to Pods %v", err)
}
defer podWatch.Stop()
// wait for initial build event from the creation of the imagerepo with tag latest
event := waitForWatch(t, "initial build added", buildWatch)
if e, a := watchapi.Added, event.Type; e != a {
t.Fatalf("expected watch event type %s, got %s", e, a)
}
newBuild := event.Object.(*buildapi.Build)
// initial pod creation for build
event = waitForWatch(t, "build pod created", podWatch)
if e, a := watchapi.Added, event.Type; e != a {
t.Fatalf("expected watch event type %s, got %s", e, a)
}
event = waitForWatch(t, "build updated to pending", buildWatch)
if e, a := watchapi.Modified, event.Type; e != a {
t.Fatalf("expected watch event type %s, got %s", e, a)
}
newBuild = event.Object.(*buildapi.Build)
if newBuild.Status.Phase != buildapi.BuildPhasePending {
t.Fatalf("expected build status to be marked pending, but was marked %s", newBuild.Status.Phase)
}
newBuild.Status.Phase = buildapi.BuildPhaseComplete
clusterAdminClient.Builds(testutil.Namespace()).Update(newBuild)
event = waitForWatch(t, "build updated to complete", buildWatch)
if e, a := watchapi.Modified, event.Type; e != a {
t.Fatalf("expected watch event type %s, got %s", e, a)
}
newBuild = event.Object.(*buildapi.Build)
if newBuild.Status.Phase != buildapi.BuildPhaseComplete {
t.Fatalf("expected build status to be marked complete, but was marked %s", newBuild.Status.Phase)
}
clusterAdminKubeClient.Pods(testutil.Namespace()).Delete(buildutil.GetBuildPodName(newBuild), kapi.NewDeleteOptions(0))
time.Sleep(10 * time.Second)
newBuild, err = clusterAdminClient.Builds(testutil.Namespace()).Get(newBuild.Name)
if err != nil {
t.Fatalf("unexpected error %v", err)
}
if newBuild.Status.Phase != buildapi.BuildPhaseComplete {
t.Fatalf("build status was updated to %s after deleting pod, should have stayed as %s", newBuild.Status.Phase, buildapi.BuildPhaseComplete)
}
}
开发者ID:Risar,项目名称:origin,代码行数:63,代码来源:buildcontroller_test.go
示例7: runBuildRunningPodDeleteTest
func runBuildRunningPodDeleteTest(t *testing.T, clusterAdminClient *client.Client, clusterAdminKubeClient *kclient.Client) {
buildWatch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(labels.Everything(), fields.Everything(), "0")
if err != nil {
t.Fatalf("Couldn't subscribe to Builds %v", err)
}
defer buildWatch.Stop()
created, err := clusterAdminClient.Builds(testutil.Namespace()).Create(mockBuild())
if err != nil {
t.Fatalf("Couldn't create Build: %v", err)
}
podWatch, err := clusterAdminKubeClient.Pods(testutil.Namespace()).Watch(labels.Everything(), fields.Everything(), created.ResourceVersion)
if err != nil {
t.Fatalf("Couldn't subscribe to Pods %v", err)
}
defer podWatch.Stop()
// wait for initial build event from the creation of the imagerepo with tag latest
event := waitForWatch(t, "initial build added", buildWatch)
if e, a := watchapi.Added, event.Type; e != a {
t.Fatalf("expected watch event type %s, got %s", e, a)
}
newBuild := event.Object.(*buildapi.Build)
// initial pod creation for build
event = waitForWatch(t, "build pod created", podWatch)
if e, a := watchapi.Added, event.Type; e != a {
t.Fatalf("expected watch event type %s, got %s", e, a)
}
event = waitForWatch(t, "build updated to pending", buildWatch)
if e, a := watchapi.Modified, event.Type; e != a {
t.Fatalf("expected watch event type %s, got %s", e, a)
}
newBuild = event.Object.(*buildapi.Build)
if newBuild.Status != buildapi.BuildStatusPending {
t.Fatalf("expected build status to be marked pending, but was marked %s", newBuild.Status)
}
clusterAdminKubeClient.Pods(testutil.Namespace()).Delete(buildutil.GetBuildPodName(newBuild), kapi.NewDeleteOptions(0))
event = waitForWatch(t, "build updated to error", buildWatch)
if e, a := watchapi.Modified, event.Type; e != a {
t.Fatalf("expected watch event type %s, got %s", e, a)
}
newBuild = event.Object.(*buildapi.Build)
if newBuild.Status != buildapi.BuildStatusError {
t.Fatalf("expected build status to be marked error, but was marked %s", newBuild.Status)
}
}
开发者ID:cjnygard,项目名称:origin,代码行数:51,代码来源:buildcontroller_test.go
示例8: Create
// Create creates a DeployerPodController.
func (factory *DeployerPodControllerFactory) Create() controller.RunnableController {
deploymentLW := &deployutil.ListWatcherImpl{
ListFunc: func() (runtime.Object, error) {
return factory.KubeClient.ReplicationControllers(kapi.NamespaceAll).List(labels.Everything())
},
WatchFunc: func(resourceVersion string) (watch.Interface, error) {
return factory.KubeClient.ReplicationControllers(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)
},
}
deploymentStore := cache.NewStore(cache.MetaNamespaceKeyFunc)
cache.NewReflector(deploymentLW, &kapi.ReplicationController{}, deploymentStore, 2*time.Minute).Run()
// TODO: These should be filtered somehow to include only the primary
// deployer pod. For now, the controller is filtering.
// TODO: Even with the label selector, this is inefficient on the backend
// and we should work to consolidate namespace-spanning pod watches. For
// example, the build infra is also watching pods across namespaces.
podLW := &deployutil.ListWatcherImpl{
ListFunc: func() (runtime.Object, error) {
return factory.KubeClient.Pods(kapi.NamespaceAll).List(deployutil.AnyDeployerPodSelector(), fields.Everything())
},
WatchFunc: func(resourceVersion string) (watch.Interface, error) {
return factory.KubeClient.Pods(kapi.NamespaceAll).Watch(deployutil.AnyDeployerPodSelector(), fields.Everything(), resourceVersion)
},
}
podQueue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
cache.NewReflector(podLW, &kapi.Pod{}, podQueue, 2*time.Minute).Run()
podController := &DeployerPodController{
deploymentClient: &deploymentClientImpl{
getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) {
// Try to use the cache first. Trust hits and return them.
example := &kapi.ReplicationController{ObjectMeta: kapi.ObjectMeta{Namespace: namespace, Name: name}}
cached, exists, err := deploymentStore.Get(example)
if err == nil && exists {
return cached.(*kapi.ReplicationController), nil
}
// Double-check with the master for cache misses/errors, since those
// are rare and API calls are expensive but more reliable.
return factory.KubeClient.ReplicationControllers(namespace).Get(name)
},
updateDeploymentFunc: func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) {
return factory.KubeClient.ReplicationControllers(namespace).Update(deployment)
},
listDeploymentsForConfigFunc: func(namespace, configName string) (*kapi.ReplicationControllerList, error) {
return factory.KubeClient.ReplicationControllers(namespace).List(deployutil.ConfigSelector(configName))
},
},
deployerPodsFor: func(namespace, name string) (*kapi.PodList, error) {
return factory.KubeClient.Pods(namespace).List(deployutil.DeployerPodSelector(name), fields.Everything())
},
deletePod: func(namespace, name string) error {
return factory.KubeClient.Pods(namespace).Delete(name, kapi.NewDeleteOptions(0))
},
}
return &controller.RetryController{
Queue: podQueue,
RetryManager: controller.NewQueueRetryManager(
podQueue,
cache.MetaNamespaceKeyFunc,
func(obj interface{}, err error, retries controller.Retry) bool {
kutil.HandleError(err)
// infinite retries for a transient error
if _, isTransient := err.(transientError); isTransient {
return true
}
// no retries for anything else
if retries.Count > 0 {
return false
}
return true
},
kutil.NewTokenBucketRateLimiter(1, 10),
),
Handle: func(obj interface{}) error {
pod := obj.(*kapi.Pod)
return podController.Handle(pod)
},
}
}
开发者ID:pombredanne,项目名称:atomic-enterprise,代码行数:82,代码来源:factory.go
注:本文中的github.com/GoogleCloudPlatform/kubernetes/pkg/api.NewDeleteOptions函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论