Python kubernetes.client.V1Container() Examples
The following are 30
code examples of kubernetes.client.V1Container().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
kubernetes.client
, or try the search function
.
Example #1
Source File: copy_dataset.py From aws-eks-deep-learning-benchmark with Apache License 2.0 | 7 votes |
def create_job_object(runner_image, region, s3_path, pvc_name): target_folder = get_target_folder(s3_path) # Configureate Pod template container container = k8s_client.V1Container( name="copy-dataset-worker", image=runner_image, command=["aws"], args=["s3", "sync", s3_path, "/mnt/" + target_folder], volume_mounts=[k8s_client.V1VolumeMount(name="data-storage", mount_path='/mnt')], env=[k8s_client.V1EnvVar(name="AWS_REGION", value=region), k8s_client.V1EnvVar(name="AWS_ACCESS_KEY_ID", value_from=k8s_client.V1EnvVarSource(secret_key_ref=k8s_client.V1SecretKeySelector(key="AWS_ACCESS_KEY_ID", name="aws-secret"))), k8s_client.V1EnvVar(name="AWS_SECRET_ACCESS_KEY", value_from=k8s_client.V1EnvVarSource(secret_key_ref=k8s_client.V1SecretKeySelector(key="AWS_SECRET_ACCESS_KEY", name="aws-secret"))) ], ) volume = k8s_client.V1Volume( name='data-storage', persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource(claim_name=pvc_name) ) # Create and configurate a spec section template = k8s_client.V1PodTemplateSpec( # metadata=k8s_client.V1ObjectMeta(labels={"app":"copy-dataset-worker"}), spec=k8s_client.V1PodSpec(containers=[container], volumes=[volume], restart_policy="OnFailure")) # Create the specification of deployment spec = k8s_client.V1JobSpec( # selector=k8s_client.V1LabelSelector(match_labels={"app":"copy-dataset-worker"}), template=template) # Instantiate the deployment object deployment = k8s_client.V1Job( api_version="batch/v1", kind="Job", metadata=k8s_client.V1ObjectMeta(name=container.name), spec=spec) return deployment
Example #2
Source File: kubernetes.py From training_results_v0.6 with Apache License 2.0 | 6 votes |
def create_job_manifest(envs, commands, name, image, template_file): if template_file is not None: with open( template_file ) as f: job=yaml.safe_load(f) job["metadata"]["name"]=name job["spec"]["template"]["metadata"]["labels"]["app"]=name job["spec"]["template"]["spec"]["containers"][0]["image"]=image job["spec"]["template"]["spec"]["containers"][0]["command"]=commands job["spec"]["template"]["spec"]["containers"][0]["name"]=name job["spec"]["template"]["spec"]["containers"][0]["env"]=envs job["spec"]["template"]["spec"]["containers"][0]["command"]=commands else: container=client.V1Container(image=image, command=commands, name=name, env=envs) pod_temp=client.V1PodTemplateSpec( spec=client.V1PodSpec(restart_policy="OnFailure", containers=[container]), metadata=client.V1ObjectMeta(name=name, labels={"app":name}) ) job=client.V1Job( api_version="batch/v1", kind="Job", spec=client.V1JobSpec(template=pod_temp), metadata=client.V1ObjectMeta(name=name) ) return job
Example #3
Source File: test_eks.py From coach with Apache License 2.0 | 6 votes |
def deploy(self): container = client.V1Container( name=self.test_name, image=self.image, command=['bash', '-c'], args=[self.test_command], image_pull_policy='Always', working_dir=self.working_dir, stdin=True, tty=True ) pod_spec = client.V1PodSpec( containers=[container], restart_policy='Never' ) pod = client.V1Pod( api_version="v1", kind="Pod", metadata=client.V1ObjectMeta(name=self.test_name), spec=pod_spec ) try: self.corev1_api.create_namespaced_pod(self.namespace, pod) except client.rest.ApiException as e: print("Got exception: {} while creating a pod".format(e)) return 1 return 0
Example #4
Source File: translate_outputs.py From tacker with Apache License 2.0 | 6 votes |
def init_containers(self, container_props, limit_resource, name): list_env_var = self.init_envs(container_props, name) container_name = self.pre_process_name(container_props.name) list_container_port = list() if container_props.ports: for container_port in container_props.ports: port = int(container_port) cport = client.V1ContainerPort(container_port=port) list_container_port.append(cport) container = client.V1Container( name=container_name, image=container_props.image, ports=list_container_port, resources=limit_resource, command=container_props.command, args=container_props.args, env=list_env_var, image_pull_policy="IfNotPresent") return container # init_deployment initializes Kubernetes Pod object
Example #5
Source File: _k8s_job_helper.py From pipelines with Apache License 2.0 | 6 votes |
def _create_k8s_job(self, yaml_spec): """ _create_k8s_job creates a kubernetes job based on the yaml spec """ pod = k8s_client.V1Pod(metadata=k8s_client.V1ObjectMeta(generate_name=yaml_spec['metadata']['generateName'], annotations=yaml_spec['metadata']['annotations'])) container = k8s_client.V1Container(name = yaml_spec['spec']['containers'][0]['name'], image = yaml_spec['spec']['containers'][0]['image'], args = yaml_spec['spec']['containers'][0]['args']) pod.spec = k8s_client.V1PodSpec(restart_policy=yaml_spec['spec']['restartPolicy'], containers = [container], service_account_name=yaml_spec['spec']['serviceAccountName']) try: api_response = self._corev1.create_namespaced_pod(yaml_spec['metadata']['namespace'], pod) return api_response.metadata.name, True except k8s_client.rest.ApiException as e: logging.exception("Exception when calling CoreV1Api->create_namespaced_pod: {}\n".format(str(e))) return '', False
Example #6
Source File: test_clusterinit.py From CPU-Manager-for-Kubernetes with Apache License 2.0 | 6 votes |
def test_clusterinit_update_pod_with_init_container(): pod_passed = k8sclient.V1Pod( metadata=k8sclient.V1ObjectMeta(annotations={}), spec=k8sclient.V1PodSpec(containers=[ k8sclient.V1Container(name="cmk") ]), status=k8sclient.V1PodStatus()).to_dict() cmd = "cmd" cmk_img = "cmk_img" cmk_img_pol = "policy" args = "argument" clusterinit.update_pod_with_init_container(pod_passed, cmd, cmk_img, cmk_img_pol, args) pods = json.loads(pod_passed["metadata"]["annotations"][ "pod.beta.kubernetes.io/init-containers"]) assert len(pods) == 1 assert pods[0]["name"] == cmd assert pods[0]["image"] == cmk_img assert pods[0]["imagePullPolicy"] == cmk_img_pol assert args in pods[0]["args"] second_cmd = "cmd2" second_img = cmk_img second_img_pol = "Always" second_args = ["arg1", "arg2"] clusterinit.update_pod_with_init_container(pod_passed, second_cmd, second_img, second_img_pol, second_args) pods = json.loads(pod_passed["metadata"]["annotations"][ "pod.beta.kubernetes.io/init-containers"]) assert len(pods) == 2
Example #7
Source File: kubernetes.py From training_results_v0.6 with Apache License 2.0 | 6 votes |
def create_job_manifest(envs, commands, name, image, template_file): if template_file is not None: with open( template_file ) as f: job=yaml.safe_load(f) job["metadata"]["name"]=name job["spec"]["template"]["metadata"]["labels"]["app"]=name job["spec"]["template"]["spec"]["containers"][0]["image"]=image job["spec"]["template"]["spec"]["containers"][0]["command"]=commands job["spec"]["template"]["spec"]["containers"][0]["name"]=name job["spec"]["template"]["spec"]["containers"][0]["env"]=envs job["spec"]["template"]["spec"]["containers"][0]["command"]=commands else: container=client.V1Container(image=image, command=commands, name=name, env=envs) pod_temp=client.V1PodTemplateSpec( spec=client.V1PodSpec(restart_policy="OnFailure", containers=[container]), metadata=client.V1ObjectMeta(name=name, labels={"app":name}) ) job=client.V1Job( api_version="batch/v1", kind="Job", spec=client.V1JobSpec(template=pod_temp), metadata=client.V1ObjectMeta(name=name) ) return job
Example #8
Source File: job_crud.py From python with Apache License 2.0 | 6 votes |
def create_job_object(): # Configureate Pod template container container = client.V1Container( name="pi", image="perl", command=["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]) # Create and configurate a spec section template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"app": "pi"}), spec=client.V1PodSpec(restart_policy="Never", containers=[container])) # Create the specification of deployment spec = client.V1JobSpec( template=template, backoff_limit=4) # Instantiate the job object job = client.V1Job( api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta(name=JOB_NAME), spec=spec) return job
Example #9
Source File: conftest.py From kubetest with GNU General Public License v3.0 | 5 votes |
def simple_daemonset(): """Return the Kubernetes config matching the simple-daemonset.yaml manifest.""" return client.V1DaemonSet( api_version='apps/v1', kind='DaemonSet', metadata=client.V1ObjectMeta( name='canal-daemonset', labels={ 'app': 'canal' } ), spec=client.V1DaemonSetSpec( selector=client.V1LabelSelector( match_labels={ 'app': 'canal' } ), template=client.V1PodTemplateSpec( metadata=client.V1ObjectMeta( labels={ 'app': 'canal' } ), spec=client.V1PodSpec( containers=[ client.V1Container( name='canal', image='canal:3.7.2', ports=[ client.V1ContainerPort( container_port=9099 ) ] ) ] ) ) ) )
Example #10
Source File: kubernetes.py From pytest-plugins with MIT License | 5 votes |
def _get_pod_spec(self): container = k8sclient.V1Container( name='fixture', image=self._image, command=self._get_cmd(), env=[k8sclient.V1EnvVar(name=k, value=v) for k, v in self._env.iteritems()], ) return k8sclient.V1PodSpec( containers=[container] )
Example #11
Source File: test_manifest.py From kubetest with GNU General Public License v3.0 | 5 votes |
def test_bad_yaml(self, manifest_dir): """Test specifying a file that is not valid YAML.""" with pytest.raises(yaml.YAMLError): manifest.load_type( client.V1Container, os.path.join(manifest_dir, 'invalid.yaml') )
Example #12
Source File: test_manifest.py From kubetest with GNU General Public License v3.0 | 5 votes |
def test_simple_persistentvolumeclaim_wrong_type(self, manifest_dir): """Test loading the simple persistentvolumeclaim to the wrong type.""" with pytest.raises(ValueError): # The V1Container requires a name -- since the manifest has no name, # it will cause V1Container construction to fail with ValueError. manifest.load_type( client.V1Container, os.path.join(manifest_dir, 'simple-persistentvolumeclaim.yaml') )
Example #13
Source File: test_manifest.py From kubetest with GNU General Public License v3.0 | 5 votes |
def test_simple_replicaset_wrong_type(self, manifest_dir): """Test loading the simple ReplicaSet to the wrong type.""" with pytest.raises(ValueError): # The V1Container requires a name -- since the manifest has no name, # it will cause V1Container construction to fail with ValueError. manifest.load_type( client.V1Container, os.path.join(manifest_dir, 'simple-replicaset.yaml') )
Example #14
Source File: test_manifest.py From kubetest with GNU General Public License v3.0 | 5 votes |
def test_simple_ingress_wrong_type(self, manifest_dir): """Test loading the simple ingress to the wrong type.""" with pytest.raises(ValueError): # The V1Container requires a name -- since the manifest has no name, # it will cause V1Container construction to fail with ValueError. manifest.load_type( client.V1Container, os.path.join(manifest_dir, 'simple-ingress.yaml') )
Example #15
Source File: test_manifest.py From kubetest with GNU General Public License v3.0 | 5 votes |
def test_simple_service_wrong_type(self, manifest_dir): """Test loading the simple service to the wrong type.""" with pytest.raises(ValueError): # The V1Container requires a name -- since the manifest has no name, # it will cause V1Container construction to fail with ValueError. manifest.load_type( client.V1Container, os.path.join(manifest_dir, 'simple-service.yaml') )
Example #16
Source File: test_manifest.py From kubetest with GNU General Public License v3.0 | 5 votes |
def test_simple_daemonset_wrong_type(self, manifest_dir): """Test loading the simple daemonset to the wrong type.""" with pytest.raises(ValueError): # The V1Container requires a name -- since the manifest has no name, # it will cause V1Container construction to fail with ValueError. manifest.load_type( client.V1Container, os.path.join(manifest_dir, 'simple-daemonset.yaml') )
Example #17
Source File: test_manifest.py From kubetest with GNU General Public License v3.0 | 5 votes |
def test_simple_deployment_wrong_type(self, manifest_dir): """Test loading the simple deployment to the wrong type.""" with pytest.raises(ValueError): # The V1Container requires a name -- since the manifest has no name, # it will cause V1Container construction to fail with ValueError. manifest.load_type( client.V1Container, os.path.join(manifest_dir, 'simple-deployment.yaml') )
Example #18
Source File: conftest.py From kubetest with GNU General Public License v3.0 | 5 votes |
def simple_replicaset(): """Return the Kubernetes config matching the simple-replicaset.yaml manifest.""" return client.V1ReplicaSet( api_version='apps/v1', kind='ReplicaSet', metadata=client.V1ObjectMeta( name='frontend', labels={ 'app': 'guestbook', 'tier': 'frontend', }, ), spec=client.V1ReplicaSetSpec( replicas=3, selector=client.V1LabelSelector( match_labels={ 'tier': 'frontend', }, ), template=client.V1PodTemplateSpec( metadata=client.V1ObjectMeta( labels={ 'tier': 'frontend', }, ), spec=client.V1PodSpec( containers=[ client.V1Container( name='php-redis', image='gcr.io/google_samples/gb-frontend:v3', ), ], ), ), ), )
Example #19
Source File: kubernetes_tools.py From paasta with Apache License 2.0 | 5 votes |
def get_kubernetes_containers( self, docker_volumes: Sequence[DockerVolume], system_paasta_config: SystemPaastaConfig, aws_ebs_volumes: Sequence[AwsEbsVolume], service_namespace_config: ServiceNamespaceConfig, ) -> Sequence[V1Container]: service_container = V1Container( image=self.get_docker_url(), command=self.get_cmd(), args=self.get_args(), env=self.get_container_env(), resources=self.get_resource_requirements(), lifecycle=V1Lifecycle( pre_stop=V1Handler( _exec=V1ExecAction(command=["/bin/sh", "-c", "sleep 30"]) ) ), name=self.get_sanitised_instance_name(), liveness_probe=self.get_liveness_probe(service_namespace_config), ports=[V1ContainerPort(container_port=self.get_container_port())], security_context=self.get_security_context(), volume_mounts=self.get_volume_mounts( docker_volumes=docker_volumes, aws_ebs_volumes=aws_ebs_volumes, persistent_volumes=self.get_persistent_volumes(), ), ) containers = [service_container] + self.get_sidecar_containers( # type: ignore system_paasta_config=system_paasta_config, service_namespace_config=service_namespace_config, ) return containers
Example #20
Source File: conftest.py From kubetest with GNU General Public License v3.0 | 5 votes |
def simple_statefulset(): """Return the Kubernetes config matching the simple-statefulset.yaml manifest.""" return client.V1StatefulSet( api_version='apps/v1', kind='StatefulSet', metadata=client.V1ObjectMeta( name='postgres-statefulset', labels={ 'app': 'postgres' } ), spec=client.V1StatefulSetSpec( replicas=3, selector=client.V1LabelSelector( match_labels={ 'app': 'postgres' } ), service_name='simple-service', template=client.V1PodTemplateSpec( metadata=client.V1ObjectMeta( labels={ 'app': 'postgres' } ), spec=client.V1PodSpec( containers=[ client.V1Container( name='postgres', image='postgres:9.6', ports=[ client.V1ContainerPort( container_port=5432 ) ] ) ] ) ) ) )
Example #21
Source File: conftest.py From kubetest with GNU General Public License v3.0 | 5 votes |
def simple_deployment(): """Return the Kubernetes config matching the simple-deployment.yaml manifest.""" return client.V1Deployment( api_version='apps/v1', kind='Deployment', metadata=client.V1ObjectMeta( name='nginx-deployment', labels={ 'app': 'nginx' } ), spec=client.V1DeploymentSpec( replicas=3, selector=client.V1LabelSelector( match_labels={ 'app': 'nginx' } ), template=client.V1PodTemplateSpec( metadata=client.V1ObjectMeta( labels={ 'app': 'nginx' } ), spec=client.V1PodSpec( containers=[ client.V1Container( name='nginx', image='nginx:1.7.9', ports=[ client.V1ContainerPort( container_port=80 ) ] ) ] ) ) ) )
Example #22
Source File: test_base.py From node with Apache License 2.0 | 5 votes |
def deploy(self, image, name, ns, port, replicas=1, svc_type="NodePort", traffic_policy="Local", cluster_ip=None, ipv6=False): """ Creates a deployment and corresponding service with the given parameters. """ # Run a deployment with <replicas> copies of <image>, with the # pods labelled with "app": <name>. deployment = client.V1Deployment( api_version="apps/v1", kind="Deployment", metadata=client.V1ObjectMeta(name=name), spec=client.V1DeploymentSpec( replicas=replicas, selector={'matchLabels': {'app': name}}, template=client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"app": name}), spec=client.V1PodSpec(containers=[ client.V1Container(name=name, image=image, ports=[client.V1ContainerPort(container_port=port)]), ])))) api_response = client.AppsV1Api().create_namespaced_deployment( body=deployment, namespace=ns) logger.debug("Deployment created. status='%s'" % str(api_response.status)) # Create a service called <name> whose endpoints are the pods # with "app": <name>; i.e. those just created above. self.create_service(name, name, ns, port, svc_type, traffic_policy, ipv6=ipv6)
Example #23
Source File: ingress_create.py From python with Apache License 2.0 | 5 votes |
def create_deployment(apps_v1_api): container = client.V1Container( name="deployment", image="gcr.io/google-appengine/fluentd-logger", image_pull_policy="Never", ports=[client.V1ContainerPort(container_port=5678)], ) # Template template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"app": "deployment"}), spec=client.V1PodSpec(containers=[container])) # Spec spec = client.V1DeploymentSpec( replicas=1, template=template) # Deployment deployment = client.V1Deployment( api_version="apps/v1", kind="Deployment", metadata=client.V1ObjectMeta(name="deployment"), spec=spec) # Creation of the Deployment in specified namespace # (Can replace "default" with a namespace you may have created) apps_v1_api.create_namespaced_deployment( namespace="default", body=deployment )
Example #24
Source File: kubernetes_runner.py From tfx with Apache License 2.0 | 5 votes |
def _get_container_or_error( pod: k8s_client.V1Pod, container_name: Text) -> k8s_client.V1Container: for container in pod.spec.containers: if container.name == container_name: return container raise ValueError( 'Unable to find {} container from the pod (found {}).'.format( container_name, [c.name for c in pod.spec.containers]))
Example #25
Source File: kfservingdeployer.py From pipelines with Apache License 2.0 | 5 votes |
def customEndpointSpec(custom_model_spec, service_account): env = ( [ client.V1EnvVar(name=i["name"], value=i["value"]) for i in custom_model_spec["env"] ] if custom_model_spec.get("env", "") else None ) ports = ( [client.V1ContainerPort(container_port=int(custom_model_spec.get("port", "")))] if custom_model_spec.get("port", "") else None ) containerSpec = client.V1Container( name=custom_model_spec.get("name", "custom-container"), image=custom_model_spec["image"], env=env, ports=ports, command=custom_model_spec.get("command", None), args=custom_model_spec.get("args", None), image_pull_policy=custom_model_spec.get("image_pull_policy", None), working_dir=custom_model_spec.get("working_dir", None), ) return V1alpha2EndpointSpec( predictor=V1alpha2PredictorSpec( custom=V1alpha2CustomSpec(container=containerSpec), service_account_name=service_account, ) )
Example #26
Source File: deployment_crud.py From python with Apache License 2.0 | 5 votes |
def create_deployment_object(): # Configureate Pod template container container = client.V1Container( name="nginx", image="nginx:1.15.4", ports=[client.V1ContainerPort(container_port=80)], resources=client.V1ResourceRequirements( requests={"cpu": "100m", "memory": "200Mi"}, limits={"cpu": "500m", "memory": "500Mi"} ) ) # Create and configurate a spec section template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"app": "nginx"}), spec=client.V1PodSpec(containers=[container])) # Create the specification of deployment spec = client.V1DeploymentSpec( replicas=3, template=template, selector={'matchLabels': {'app': 'nginx'}}) # Instantiate the deployment object deployment = client.V1Deployment( api_version="apps/v1", kind="Deployment", metadata=client.V1ObjectMeta(name=DEPLOYMENT_NAME), spec=spec) return deployment
Example #27
Source File: kubernetes_runner_test.py From tfx with Apache License 2.0 | 4 votes |
def _AssumeInsideKfp( self, namespace='my-namespace', pod_name='my-pod-name', pod_uid='my-pod-uid', pod_service_account_name='my-service-account-name', with_pvc=False): pod = k8s_client.V1Pod( api_version='v1', kind='Pod', metadata=k8s_client.V1ObjectMeta( name=pod_name, uid=pod_uid, ), spec=k8s_client.V1PodSpec( containers=[ k8s_client.V1Container( name='main', volume_mounts=[]), ], volumes=[])) if with_pvc: pod.spec.volumes.append( k8s_client.V1Volume( name='my-volume', persistent_volume_claim=k8s_client .V1PersistentVolumeClaimVolumeSource( claim_name='my-pvc'))) pod.spec.containers[0].volume_mounts.append( k8s_client.V1VolumeMount( name='my-volume', mount_path=self._base_dir)) mock.patch.object(kube_utils, 'is_inside_kfp', return_value=True).start() pod.spec.service_account_name = pod_service_account_name mock.patch.object(kube_utils, 'get_current_kfp_pod', return_value=pod).start() mock.patch.object(kube_utils, 'get_kfp_namespace', return_value=namespace).start() if with_pvc: (self._mock_core_v1_api.read_namespaced_persistent_volume_claim .return_value) = k8s_client.V1PersistentVolumeClaim( metadata=k8s_client.V1ObjectMeta( name='my-pvc'), spec=k8s_client.V1PersistentVolumeClaimSpec( access_modes=['ReadWriteMany']))
Example #28
Source File: kubernetes_runner.py From tfx with Apache License 2.0 | 4 votes |
def _BuildPodManifest(self) -> k8s_client.V1Pod: if isinstance(self._serving_binary, serving_bins.TensorFlowServing): env_vars_dict = self._serving_binary.MakeEnvVars( model_path=self._model_path) env_vars = [k8s_client.V1EnvVar(name=key, value=value) for key, value in env_vars_dict.items()] else: raise NotImplementedError('Unsupported serving binary {}'.format( type(self._serving_binary).__name__)) service_account_name = (self._config.service_account_name or self._executor_pod.spec.service_account_name) active_deadline_seconds = (self._config.active_deadline_seconds or _DEFAULT_ACTIVE_DEADLINE_SEC) if active_deadline_seconds < 0: raise ValueError('active_deadline_seconds should be > 0. Got {}' .format(active_deadline_seconds)) result = k8s_client.V1Pod( metadata=k8s_client.V1ObjectMeta( generate_name=_MODEL_SERVER_POD_NAME_PREFIX, labels=self._label_dict, # Resources with ownerReferences are automatically deleted once all # its owners are deleted. owner_references=[ k8s_client.V1OwnerReference( api_version=self._executor_pod.api_version, kind=self._executor_pod.kind, name=self._executor_pod.metadata.name, uid=self._executor_pod.metadata.uid, ), ], ), spec=k8s_client.V1PodSpec( containers=[ k8s_client.V1Container( name=_MODEL_SERVER_CONTAINER_NAME, image=self._serving_binary.image, env=env_vars, volume_mounts=[], ), ], service_account_name=service_account_name, # No retry in case model server container failed. Retry will happen # at the outermost loop (executor.py). restart_policy=_RestartPolicy.NEVER.value, # This is a hard deadline for the model server container to ensure # the Pod is properly cleaned up even with an unexpected termination # of an infra validator. After the deadline, container will be # removed but Pod resource won't. This makes the Pod log visible # after the termination. active_deadline_seconds=active_deadline_seconds, volumes=[], # TODO(b/152002076): Add TTL controller once it graduates Beta. # ttl_seconds_after_finished=, ) ) self._SetupModelVolumeIfNeeded(result) return result
Example #29
Source File: pod.py From conu with GNU General Public License v3.0 | 4 votes |
def create(image_data): """ :param image_data: ImageMetadata :return: V1Pod, https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1Pod.md """ # convert environment variables to Kubernetes objects env_variables = [] for key, value in image_data.env_variables.items(): env_variables.append(client.V1EnvVar(name=key, value=value)) # convert exposed ports to Kubernetes objects exposed_ports = [] if image_data.exposed_ports is not None: for port in image_data.exposed_ports: splits = port.split("/", 1) port = int(splits[0]) protocol = splits[1].upper() if len(splits) > 1 else None exposed_ports.append(client.V1ContainerPort(container_port=port, protocol=protocol)) # generate container name {image-name}-{username}-{random-4-letters} # take just name of image and remove tag image_name = image_data.name.split("/")[-1].split(":")[0] random_string = ''.join( random.choice(string.ascii_lowercase + string.digits) for _ in range(4)) container_name = '{image_name}-{user_name}-{random_string}'.format( image_name=image_name, user_name=getpass.getuser(), random_string=random_string) container = client.V1Container(command=image_data.command, env=env_variables, image=image_data.name, name=container_name, ports=exposed_ports) pod_metadata = client.V1ObjectMeta(name=container_name + "-pod") pod_spec = client.V1PodSpec(containers=[container]) pod = client.V1Pod(spec=pod_spec, metadata=pod_metadata) return pod
Example #30
Source File: kubernetes_tools.py From paasta with Apache License 2.0 | 4 votes |
def get_sidecar_containers( self, system_paasta_config: SystemPaastaConfig, service_namespace_config: ServiceNamespaceConfig, ) -> Sequence[V1Container]: registrations = " ".join(self.get_registrations()) # s_m_j currently asserts that services are healthy in smartstack before # continuing a bounce. this readiness check lets us achieve the same thing readiness_probe: Optional[V1Probe] if ( self.get_enable_nerve_readiness_check() and service_namespace_config.is_in_smartstack() ): readiness_probe = V1Probe( _exec=V1ExecAction( command=[ system_paasta_config.get_nerve_readiness_check_script(), str(self.get_container_port()), ] + self.get_registrations() ), initial_delay_seconds=10, period_seconds=10, ) else: readiness_probe = None sidecars = [] if service_namespace_config.is_in_smartstack(): sidecars.append( V1Container( image=system_paasta_config.get_hacheck_sidecar_image_url(), lifecycle=V1Lifecycle( pre_stop=V1Handler( _exec=V1ExecAction( command=[ "/bin/sh", "-c", f"/usr/bin/hadown {registrations}; sleep 31", ] ) ) ), resources=self.get_sidecar_resource_requirements(), name=HACHECK_POD_NAME, env=self.get_kubernetes_environment(), ports=[V1ContainerPort(container_port=6666)], readiness_probe=readiness_probe, ) ) return sidecars