Python kubernetes.client.V1Volume() Examples
The following are 12
code examples of kubernetes.client.V1Volume().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
kubernetes.client
, or try the search function
.
Example #1
Source File: volume.py From pipelines with Apache License 2.0 | 8 votes |
def volume_pipeline(): op1 = dsl.ContainerOp( name='download', image='google/cloud-sdk', command=['sh', '-c'], arguments=['ls | tee /tmp/results.txt'], file_outputs={'downloaded': '/tmp/results.txt'}) \ .add_volume(k8s_client.V1Volume(name='gcp-credentials', secret=k8s_client.V1SecretVolumeSource( secret_name='user-gcp-sa'))) \ .add_volume_mount(k8s_client.V1VolumeMount( mount_path='/secret/gcp-credentials', name='gcp-credentials')) \ .add_env_variable(k8s_client.V1EnvVar( name='GOOGLE_APPLICATION_CREDENTIALS', value='/secret/gcp-credentials/user-gcp-sa.json')) \ .add_env_variable(k8s_client.V1EnvVar(name='Foo', value='bar')) op2 = dsl.ContainerOp( name='echo', image='library/bash', command=['sh', '-c'], arguments=['echo %s' % op1.output])
Example #2
Source File: copy_dataset.py From aws-eks-deep-learning-benchmark with Apache License 2.0 | 7 votes |
def create_job_object(runner_image, region, s3_path, pvc_name): target_folder = get_target_folder(s3_path) # Configureate Pod template container container = k8s_client.V1Container( name="copy-dataset-worker", image=runner_image, command=["aws"], args=["s3", "sync", s3_path, "/mnt/" + target_folder], volume_mounts=[k8s_client.V1VolumeMount(name="data-storage", mount_path='/mnt')], env=[k8s_client.V1EnvVar(name="AWS_REGION", value=region), k8s_client.V1EnvVar(name="AWS_ACCESS_KEY_ID", value_from=k8s_client.V1EnvVarSource(secret_key_ref=k8s_client.V1SecretKeySelector(key="AWS_ACCESS_KEY_ID", name="aws-secret"))), k8s_client.V1EnvVar(name="AWS_SECRET_ACCESS_KEY", value_from=k8s_client.V1EnvVarSource(secret_key_ref=k8s_client.V1SecretKeySelector(key="AWS_SECRET_ACCESS_KEY", name="aws-secret"))) ], ) volume = k8s_client.V1Volume( name='data-storage', persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource(claim_name=pvc_name) ) # Create and configurate a spec section template = k8s_client.V1PodTemplateSpec( # metadata=k8s_client.V1ObjectMeta(labels={"app":"copy-dataset-worker"}), spec=k8s_client.V1PodSpec(containers=[container], volumes=[volume], restart_policy="OnFailure")) # Create the specification of deployment spec = k8s_client.V1JobSpec( # selector=k8s_client.V1LabelSelector(match_labels={"app":"copy-dataset-worker"}), template=template) # Instantiate the deployment object deployment = k8s_client.V1Job( api_version="batch/v1", kind="Job", metadata=k8s_client.V1ObjectMeta(name=container.name), spec=spec) return deployment
Example #3
Source File: onprem.py From pipelines with Apache License 2.0 | 6 votes |
def mount_pvc(pvc_name='pipeline-claim', volume_name='pipeline', volume_mount_path='/mnt/pipeline'): """ Modifier function to apply to a Container Op to simplify volume, volume mount addition and enable better reuse of volumes, volume claims across container ops. Usage: train = train_op(...) train.apply(mount_pvc('claim-name', 'pipeline', '/mnt/pipeline')) """ def _mount_pvc(task): from kubernetes import client as k8s_client # there can be other ops in a pipeline (e.g. ResourceOp, VolumeOp) # refer to #3906 if not hasattr(task, "add_volume") or not hasattr(task, "add_volume_mount"): return task local_pvc = k8s_client.V1PersistentVolumeClaimVolumeSource(claim_name=pvc_name) return ( task .add_volume( k8s_client.V1Volume(name=volume_name, persistent_volume_claim=local_pvc) ) .add_volume_mount( k8s_client.V1VolumeMount(mount_path=volume_mount_path, name=volume_name) ) ) return _mount_pvc
Example #4
Source File: kubernetes_tools.py From paasta with Apache License 2.0 | 5 votes |
def get_pod_volumes( self, docker_volumes: Sequence[DockerVolume], aws_ebs_volumes: Sequence[AwsEbsVolume], ) -> Sequence[V1Volume]: pod_volumes = [] unique_docker_volumes = { self.get_docker_volume_name(docker_volume): docker_volume for docker_volume in docker_volumes } for name, docker_volume in unique_docker_volumes.items(): pod_volumes.append( V1Volume( host_path=V1HostPathVolumeSource(path=docker_volume["hostPath"]), name=name, ) ) unique_aws_ebs_volumes = { self.get_aws_ebs_volume_name(aws_ebs_volume): aws_ebs_volume for aws_ebs_volume in aws_ebs_volumes } for name, aws_ebs_volume in unique_aws_ebs_volumes.items(): pod_volumes.append( V1Volume( aws_elastic_block_store=V1AWSElasticBlockStoreVolumeSource( volume_id=aws_ebs_volume["volume_id"], fs_type=aws_ebs_volume.get("fs_type"), partition=aws_ebs_volume.get("partition"), # k8s wants RW volume even if it's later mounted RO read_only=False, ), name=name, ) ) return pod_volumes
Example #5
Source File: deploy.py From margipose with Apache License 2.0 | 5 votes |
def _host_volume(name, path, type): return client.V1Volume( name=name, host_path=client.V1HostPathVolumeSource(path=path, type=type) )
Example #6
Source File: gcp.py From pipelines with Apache License 2.0 | 5 votes |
def use_gcp_secret(secret_name='user-gcp-sa', secret_file_path_in_volume=None, volume_name=None, secret_volume_mount_path='/secret/gcp-credentials'): """An operator that configures the container to use GCP service account by service account key stored in a Kubernetes secret. For cluster setup and alternatives to using service account key, check https://www.kubeflow.org/docs/gke/authentication-pipelines/. """ # permitted values for secret_name = ['admin-gcp-sa', 'user-gcp-sa'] if secret_file_path_in_volume is None: secret_file_path_in_volume = '/' + secret_name + '.json' if volume_name is None: volume_name = 'gcp-credentials-' + secret_name else: import warnings warnings.warn('The volume_name parameter is deprecated and will be removed in next release. The volume names are now generated automatically.', DeprecationWarning) def _use_gcp_secret(task): from kubernetes import client as k8s_client task = task.add_volume( k8s_client.V1Volume( name=volume_name, secret=k8s_client.V1SecretVolumeSource( secret_name=secret_name, ) ) ) task.container \ .add_volume_mount( k8s_client.V1VolumeMount( name=volume_name, mount_path=secret_volume_mount_path, ) ) \ .add_env_variable( k8s_client.V1EnvVar( name='GOOGLE_APPLICATION_CREDENTIALS', value=secret_volume_mount_path + secret_file_path_in_volume, ) ) \ .add_env_variable( k8s_client.V1EnvVar( name='CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE', value=secret_volume_mount_path + secret_file_path_in_volume, ) ) # Set GCloud Credentials by using the env var override. # TODO: Is there a better way for GCloud to pick up the credential? return task return _use_gcp_secret
Example #7
Source File: resourceop_basic.py From pipelines with Apache License 2.0 | 5 votes |
def resourceop_basic(username, password): secret_resource = k8s_client.V1Secret( api_version="v1", kind="Secret", metadata=k8s_client.V1ObjectMeta(generate_name="my-secret-"), type="Opaque", data={"username": username, "password": password} ) rop = dsl.ResourceOp( name="create-my-secret", k8s_resource=secret_resource, attribute_outputs={"name": "{.metadata.name}"} ) secret = k8s_client.V1Volume( name="my-secret", secret=k8s_client.V1SecretVolumeSource(secret_name=rop.output) ) cop = dsl.ContainerOp( name="cop", image="library/bash:4.4.23", command=["sh", "-c"], arguments=["ls /etc/secret-volume"], pvolumes={"/etc/secret-volume": secret} )
Example #8
Source File: watson_train_serve_pipeline.py From pipelines with Apache License 2.0 | 5 votes |
def use_ai_pipeline_params(secret_name, secret_volume_mount_path='/app/secrets', image_pull_policy='IfNotPresent'): def _use_ai_pipeline_params(task): from kubernetes import client as k8s_client task = task.add_volume(k8s_client.V1Volume(name=secret_name, # secret_name as volume name secret=k8s_client.V1SecretVolumeSource(secret_name=secret_name))) task.container.add_volume_mount(k8s_client.V1VolumeMount(mount_path=secret_volume_mount_path, name=secret_name)) task.container.set_image_pull_policy(image_pull_policy) return task return _use_ai_pipeline_params # create pipelines
Example #9
Source File: kubernetes_runner.py From tfx with Apache License 2.0 | 5 votes |
def _SetupModelVolumeIfNeeded(self, pod_manifest: k8s_client.V1Pod): mount = self._FindVolumeMountForPath(self._model_path) if not mount: return [volume] = [v for v in self._executor_pod.spec.volumes if v.name == mount.name] if volume.persistent_volume_claim is None: raise NotImplementedError('Only PersistentVolumeClaim is allowed.') claim_name = volume.persistent_volume_claim.claim_name pvc = self._k8s_core_api.read_namespaced_persistent_volume_claim( name=claim_name, namespace=self._namespace) # PersistentVolumeClaim for pipeline root SHOULD have ReadWriteMany access # mode. Although it is allowed to mount ReadWriteOnce volume if Pods share # the Node, there's no guarantee the model server Pod will be launched in # the same Node. if all(access_mode != _AccessMode.READ_WRITE_MANY.value for access_mode in pvc.spec.access_modes): raise RuntimeError('Access mode should be ReadWriteMany.') logging.info('PersistentVolumeClaim %s will be mounted to %s.', pvc, mount.mount_path) pod_manifest.spec.volumes.append( k8s_client.V1Volume( name=_MODEL_SERVER_MODEL_VOLUME_NAME, persistent_volume_claim=k8s_client .V1PersistentVolumeClaimVolumeSource( claim_name=claim_name, read_only=True))) container_manifest = _get_container_or_error( pod_manifest, container_name=_MODEL_SERVER_CONTAINER_NAME) container_manifest.volume_mounts.append( k8s_client.V1VolumeMount( name=_MODEL_SERVER_MODEL_VOLUME_NAME, mount_path=mount.mount_path, read_only=True, ) )
Example #10
Source File: kubernetes.py From pipelines with Apache License 2.0 | 4 votes |
def use_secret(secret_name:str, secret_volume_mount_path:str, env_variable:str=None, secret_file_path_in_volume:str=None): """ An operator that configures the container to use a secret. This assumes that the secret is created and availabel in the k8s cluster. Keyword Arguments: secret_name {String} -- [Required] The k8s secret name. secret_volume_mount_path {String} -- [Required] The path to the secret that is mounted. env_variable {String} -- Env variable pointing to the mounted secret file. Requires both the env_variable and secret_file_path_in_volume to be defined. The value is the path to the secret. secret_file_path_in_volume {String} -- The path to the secret in the volume. This will be the value of env_variable. Both env_variable and secret_file_path_in_volume needs to be set if any env variable should be created. Raises: ValueError: If not the necessary variables (secret_name, volume_name", secret_volume_mount_path) are supplied. Or only one of env_variable and secret_file_path_in_volume are supplied Returns: [ContainerOperator] -- Returns the container operator after it has been modified. """ secret_name = str(secret_name) if '{{' in secret_name: volume_name = ''.join(random.choices(string.ascii_lowercase + string.digits, k=10)) + "_volume" else: volume_name = secret_name for param, param_name in zip([secret_name, secret_volume_mount_path],["secret_name","secret_volume_mount_path"]): if param == "": raise ValueError("The '{}' must not be empty".format(param_name)) if bool(env_variable) != bool(secret_file_path_in_volume): raise ValueError("Both {} and {} needs to be supplied together or not at all".format(env_variable, secret_file_path_in_volume)) def _use_secret(task): import os from kubernetes import client as k8s_client task = task.add_volume( k8s_client.V1Volume( name=volume_name, secret=k8s_client.V1SecretVolumeSource( secret_name=secret_name ) ) ).add_volume_mount( k8s_client.V1VolumeMount( name=volume_name, mount_path=secret_volume_mount_path ) ) if env_variable: task.container.add_env_variable( k8s_client.V1EnvVar( name=env_variable, value=os.path.join(secret_volume_mount_path, secret_file_path_in_volume), ) ) return task return _use_secret
Example #11
Source File: kubernetes_runner_test.py From tfx with Apache License 2.0 | 4 votes |
def _AssumeInsideKfp( self, namespace='my-namespace', pod_name='my-pod-name', pod_uid='my-pod-uid', pod_service_account_name='my-service-account-name', with_pvc=False): pod = k8s_client.V1Pod( api_version='v1', kind='Pod', metadata=k8s_client.V1ObjectMeta( name=pod_name, uid=pod_uid, ), spec=k8s_client.V1PodSpec( containers=[ k8s_client.V1Container( name='main', volume_mounts=[]), ], volumes=[])) if with_pvc: pod.spec.volumes.append( k8s_client.V1Volume( name='my-volume', persistent_volume_claim=k8s_client .V1PersistentVolumeClaimVolumeSource( claim_name='my-pvc'))) pod.spec.containers[0].volume_mounts.append( k8s_client.V1VolumeMount( name='my-volume', mount_path=self._base_dir)) mock.patch.object(kube_utils, 'is_inside_kfp', return_value=True).start() pod.spec.service_account_name = pod_service_account_name mock.patch.object(kube_utils, 'get_current_kfp_pod', return_value=pod).start() mock.patch.object(kube_utils, 'get_kfp_namespace', return_value=namespace).start() if with_pvc: (self._mock_core_v1_api.read_namespaced_persistent_volume_claim .return_value) = k8s_client.V1PersistentVolumeClaim( metadata=k8s_client.V1ObjectMeta( name='my-pvc'), spec=k8s_client.V1PersistentVolumeClaimSpec( access_modes=['ReadWriteMany']))
Example #12
Source File: kubeflow-pipeline.py From OLive with MIT License | 4 votes |
def onnx_pipeline( model, output_onnx_path, model_type, output_perf_result_path, execution_providers="", model_inputs_names="", model_outputs_names="", model_input_shapes="", model_initial_types="", caffe_model_prototxt="", target_opset=7): # Create a component named "Convert To ONNX" and "ONNX Runtime Perf". Edit the V1PersistentVolumeClaimVolumeSource # name to match the persistent volume claim you created if needed. By default the names match ../azure-files-sc.yaml # and ../azure-files-pvc.yaml convert_op = onnxConverterOp('Convert To ONNX', '%s' % model, '%s' % output_onnx_path, '%s' % model_type, '%s' % model_inputs_names, '%s' % model_outputs_names, '%s' % model_input_shapes, '%s' % model_initial_types, '%s' % caffe_model_prototxt, '%s' % target_opset).add_volume( k8s_client.V1Volume(name='pipeline-nfs', persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource( claim_name='azurefile'))).add_volume_mount(k8s_client.V1VolumeMount(mount_path='/mnt', name='pipeline-nfs')) perf_op = perfTestOp('ONNX Runtime Perf', convert_op.output, '%s' % output_perf_result_path, '%s' % execution_providers, ).add_volume( k8s_client.V1Volume(name='pipeline-nfs', persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource( claim_name='azurefile'))).add_volume_mount( k8s_client.V1VolumeMount(mount_path='/mnt', name='pipeline-nfs')).set_gpu_limit(1) dsl.get_pipeline_conf().set_image_pull_secrets([k8s_client.V1ObjectReference(name="regcred")])