Python kubernetes.client.BatchV1Api() Examples
The following are 12
code examples of kubernetes.client.BatchV1Api().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
kubernetes.client
, or try the search function
.
Example #1
Source File: k8s.py From armada with Apache License 2.0 | 6 votes |
def __init__(self, bearer_token=None): ''' Initialize connection to Kubernetes ''' self.bearer_token = bearer_token api_client = None try: config.load_incluster_config() except config.config_exception.ConfigException: config.load_kube_config() if self.bearer_token: # Configure API key authorization: Bearer Token configuration = client.Configuration() configuration.api_key_prefix['authorization'] = 'Bearer' configuration.api_key['authorization'] = self.bearer_token api_client = client.ApiClient(configuration) self.client = client.CoreV1Api(api_client) self.batch_api = client.BatchV1Api(api_client) self.batch_v1beta1_api = client.BatchV1beta1Api(api_client) self.custom_objects = client.CustomObjectsApi(api_client) self.api_extensions = client.ApiextensionsV1beta1Api(api_client) self.extension_api = client.ExtensionsV1beta1Api(api_client) self.apps_v1_api = client.AppsV1Api(api_client)
Example #2
Source File: job_crud.py From python with Apache License 2.0 | 6 votes |
def main(): # Configs can be set in Configuration class directly or using helper # utility. If no argument provided, the config will be loaded from # default location. config.load_kube_config() batch_v1 = client.BatchV1Api() # Create a job object with client-python API. The job we # created is same as the `pi-job.yaml` in the /examples folder. job = create_job_object() create_job(batch_v1, job) update_job(batch_v1, job) delete_job(batch_v1)
Example #3
Source File: k8s.py From armada with Apache License 2.0 | 6 votes |
def __init__(self): ''' Initialize connection to Kubernetes ''' try: config.load_incluster_config() except config.config_exception.ConfigException: config.load_kube_config() self.client = client.CoreV1Api() self.batch_api = client.BatchV1Api() self.batch_v1beta1_api = client.BatchV1beta1Api() self.extension_api = client.ExtensionsV1beta1Api()
Example #4
Source File: copy_dataset.py From aws-eks-deep-learning-benchmark with Apache License 2.0 | 6 votes |
def copy_dataset(): """Install Benchmark Addons.""" logging.basicConfig(level=logging.INFO, format=('%(asctime)s %(name)-12s %(levelname)-8s %(message)s'), datefmt='%Y-%m-%dT%H:%M:%S', ) logging.getLogger().setLevel(logging.INFO) args = parse_args() namespace = args.namespace kubeconfig_path = str(os.environ['KUBECONFIG']) api_client = deploy_utils.create_k8s_client(kubeconfig_path) batchv1_api = k8s_client.BatchV1Api(api_client) # need pvc, s3 bucket dataset name deployment = create_job_object(args.runner_image, args.region, args.s3_import_path, args.pvc) batchv1_api.create_namespaced_job(namespace, deployment) # describe logging.info("Wait for data copy finish.") wait_for_job(api_client, namespace, "copy-dataset-worker") logging.info("Finish copy data from %s to pvc %s", args.s3_import_path, args.pvc)
Example #5
Source File: kubernetes_orchestrator.py From coach with Apache License 2.0 | 6 votes |
def undeploy(self): """ Undeploy all the components, such as trainer and rollout worker(s), Redis pub/sub and data store, when required. """ trainer_params = self.params.run_type_params.get(str(RunType.TRAINER), None) api_client = k8sclient.BatchV1Api() delete_options = k8sclient.V1DeleteOptions( propagation_policy="Foreground" ) if trainer_params: try: api_client.delete_namespaced_job(trainer_params.orchestration_params['job_name'], self.params.namespace, delete_options) except k8sclient.rest.ApiException as e: print("Got exception: %s\n while deleting trainer", e) worker_params = self.params.run_type_params.get(str(RunType.ROLLOUT_WORKER), None) if worker_params: try: api_client.delete_namespaced_job(worker_params.orchestration_params['job_name'], self.params.namespace, delete_options) except k8sclient.rest.ApiException as e: print("Got exception: %s\n while deleting workers", e) self.memory_backend.undeploy() self.data_store.undeploy()
Example #6
Source File: copy_dataset.py From aws-eks-deep-learning-benchmark with Apache License 2.0 | 5 votes |
def wait_for_job(api_client, namespace, name, timeout_minutes=20, replicas=1): """Wait for deployment to be ready. Args: api_client: K8s api client to use. namespace: The name space for the deployment. name: The name of the deployment. timeout_minutes: Timeout interval in minutes. replicas: Number of replicas that must be running. Returns: deploy: The deploy object describing the deployment. Raises: TimeoutError: If timeout waiting for deployment to be ready. """ # Wait for tiller to be ready end_time = datetime.datetime.now() + datetime.timedelta( minutes=timeout_minutes) ext_client = k8s_client.BatchV1Api(api_client) while datetime.datetime.now() < end_time: deploy = ext_client.read_namespaced_job(name, namespace) # ready_replicas could be None if (deploy.status.succeeded and deploy.status.succeeded >= replicas): logging.info("Job %s in namespace %s is ready", name, namespace) return deploy logging.info("Waiting for job %s in namespace %s", name, namespace) time.sleep(10) logging.error("Timeout waiting for Job %s in namespace %s to be " "ready", name, namespace) util.run(["kubectl", "describe", "job", "-n", namespace, name]) raise TimeoutError( "Timeout waiting for job {0} in namespace {1}".format( name, namespace))
Example #7
Source File: kubernetes_helper.py From ml-on-gcp with Apache License 2.0 | 5 votes |
def create_job(job_body, namespace='default'): config.load_kube_config() v1 = client.BatchV1Api() job = v1.create_namespaced_job(body=job_body, namespace=namespace) return job
Example #8
Source File: kubernetes_helper.py From ml-on-gcp with Apache License 2.0 | 5 votes |
def delete_job(job_name, namespace='default'): config.load_kube_config() batch_v1 = client.BatchV1Api() print('deleting job {} with namespace {}'.format(job_name, namespace)) delete = batch_v1.delete_namespaced_job(name=job_name, body=client.V1DeleteOptions(), namespace=namespace) return delete
Example #9
Source File: client.py From kube-shell with Apache License 2.0 | 5 votes |
def __init__(self): self.logger = logging.getLogger(__name__) try: config_file = os.path.expanduser(kubeconfig_filepath) config.load_kube_config(config_file=config_file) except: self.logger.warning("unable to load kube-config") self.v1 = client.CoreV1Api() self.v1Beta1 = client.AppsV1beta1Api() self.extensionsV1Beta1 = client.ExtensionsV1beta1Api() self.autoscalingV1Api = client.AutoscalingV1Api() self.rbacApi = client.RbacAuthorizationV1beta1Api() self.batchV1Api = client.BatchV1Api() self.batchV2Api = client.BatchV2alpha1Api()
Example #10
Source File: manager.py From polyaxon with Apache License 2.0 | 5 votes |
def k8s_batch_api(self): if not self._k8s_batch_api: self._k8s_batch_api = client.BatchV1Api(self.api_client) return self._k8s_batch_api
Example #11
Source File: kubernetes.py From training_results_v0.6 with Apache License 2.0 | 4 votes |
def submit(args): def kubernetes_submit(nworker, nserver, pass_envs): sv_image = args.kube_server_image wk_image = args.kube_worker_image if args.jobname is not None: r_uri = "mx-" + args.jobname + "-sched" else: r_uri = "mx-sched" r_port = 9091 sd_envs = create_env( r_uri, r_port, nserver, nworker ) mn_jobs = [] mn_sh_job = create_sched_job_manifest( str(nworker), str(nserver), sd_envs, sv_image, args.command) mn_sh_svc = create_sched_svc_manifest(r_uri, r_port) for i in range(nserver): envs = create_env( r_uri, r_port, nserver, nworker ) mn_sv = create_ps_manifest( str(i), str(nserver), args.jobname, envs, sv_image, args.command, args.kube_server_template ) mn_jobs.append(mn_sv) for i in range(nworker): envs = create_env( r_uri, r_port, nserver, nworker ) mn_wk = create_wk_manifest( str(i), str(nworker), str(nserver), args.jobname, envs, wk_image, args.command, args.kube_worker_template ) mn_jobs.append(mn_wk) config.load_kube_config() k8s_coreapi = client.CoreV1Api() k8s_batch = client.BatchV1Api() resp = k8s_batch.create_namespaced_job(namespace=args.kube_namespace, body=mn_sh_job) print( resp.kind + " " + resp.metadata.name +" is created." ) resp = k8s_coreapi.create_namespaced_service(namespace="default", body=mn_sh_svc) print( resp.kind + " " + resp.metadata.name +" is created." ) for m in mn_jobs: resp = k8s_batch.create_namespaced_job( body=m, namespace="default") print( resp.kind + " " + resp.metadata.name +" is created." ) return kubernetes_submit tracker.submit(args.num_workers, args.num_servers, fun_submit=kubernetes_submit, pscmd="echo \"To check each log, try 'kubectl logs job/{{role}}-{{jobname}}-{{workerID}}'\"")
Example #12
Source File: kubernetes.py From training_results_v0.6 with Apache License 2.0 | 4 votes |
def submit(args): def kubernetes_submit(nworker, nserver, pass_envs): sv_image = args.kube_server_image wk_image = args.kube_worker_image if args.jobname is not None: r_uri = "mx-" + args.jobname + "-sched" else: r_uri = "mx-sched" r_port = 9091 sd_envs = create_env( r_uri, r_port, nserver, nworker ) mn_jobs = [] mn_sh_job = create_sched_job_manifest( str(nworker), str(nserver), sd_envs, sv_image, args.command) mn_sh_svc = create_sched_svc_manifest(r_uri, r_port) for i in range(nserver): envs = create_env( r_uri, r_port, nserver, nworker ) mn_sv = create_ps_manifest( str(i), str(nserver), args.jobname, envs, sv_image, args.command, args.kube_server_template ) mn_jobs.append(mn_sv) for i in range(nworker): envs = create_env( r_uri, r_port, nserver, nworker ) mn_wk = create_wk_manifest( str(i), str(nworker), str(nserver), args.jobname, envs, wk_image, args.command, args.kube_worker_template ) mn_jobs.append(mn_wk) config.load_kube_config() k8s_coreapi = client.CoreV1Api() k8s_batch = client.BatchV1Api() resp = k8s_batch.create_namespaced_job(namespace=args.kube_namespace, body=mn_sh_job) print( resp.kind + " " + resp.metadata.name +" is created." ) resp = k8s_coreapi.create_namespaced_service(namespace="default", body=mn_sh_svc) print( resp.kind + " " + resp.metadata.name +" is created." ) for m in mn_jobs: resp = k8s_batch.create_namespaced_job( body=m, namespace="default") print( resp.kind + " " + resp.metadata.name +" is created." ) return kubernetes_submit tracker.submit(args.num_workers, args.num_servers, fun_submit=kubernetes_submit, pscmd="echo \"To check each log, try 'kubectl logs job/{{role}}-{{jobname}}-{{workerID}}'\"")