Python kubernetes.config.new_client_from_config() Examples
The following are 7
code examples of kubernetes.config.new_client_from_config().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
kubernetes.config
, or try the search function
.
![](https://www.programcreek.com/common/static/images/search.png)
Example #1
Source File: pipeline.py From freight with Apache License 2.0 | 6 votes |
def load_kube_credentials(config: Dict[str, Any]) -> ApiClient: # If a context is specified, attempt to use this first. try: return new_client_from_config(context=config["context"]) except KeyError: pass try: credentials = config["credentials"] except KeyError: raise AuthenticationError("Missing kubernetes context.") if credentials["kind"] not in ("gcloud",): raise AuthenticationError(f"Unknown kubernetes kind: {credentials['kind']}") return load_kube_credentials_gcloud(credentials)
Example #2
Source File: __init__.py From insights-core with Apache License 2.0 | 6 votes |
def __init__(self, ctx=None, cfg=None): cfg = cfg or os.environ.get("KUBECONFIG") if cfg: k8s_client = config.new_client_from_config(cfg) else: config.load_incluster_config() # makes a singleton config behind the scenes k8cfg = Configuration() # gets a copy from what was populated in the line above # NOTE this is required due to https://github.com/openshift/origin/issues/22125 k8cfg.verify_ssl = False k8s_client = ApiClient(configuration=k8cfg) # this should use the singleton produced above self.k8s = DynamicClient(k8s_client) # stole this from config.new_client_from_config
Example #3
Source File: openshift_ops.py From ocs-ci with MIT License | 6 votes |
def __init__(self): k8s_client = config.new_client_from_config() dyn_client = DynamicClient(k8s_client) self.v1_service_list = dyn_client.resources.get( api_version='v1', kind='ServiceList' ) self.v1_projects = dyn_client.resources.get( api_version='project.openshift.io/v1', kind='Project' ) self.pods = dyn_client.resources.get( api_version=default.API_VERSION, kind='Pod' ) self.deployments = dyn_client.resources.get( api_version=default.API_VERSION, kind='Deployment' ) self.services = dyn_client.resources.get( api_version=default.API_VERSION, kind='Service' )
Example #4
Source File: test_kubernetes.py From mars with Apache License 2.0 | 5 votes |
def testCreateTimeout(self): api_client = k8s_config.new_client_from_config() cluster = None self._docker_image = 'pseudo_image' try: extra_vol_config = HostPathVolumeConfig('mars-src-path', '/mnt/mars', MARS_ROOT) with self.assertRaises(TimeoutError): cluster = new_cluster(api_client, image=self._docker_image, extra_volumes=[extra_vol_config], timeout=1) finally: if cluster: cluster.stop(wait=True)
Example #5
Source File: multiple_clusters.py From python with Apache License 2.0 | 5 votes |
def main(): contexts, active_context = config.list_kube_config_contexts() if not contexts: print("Cannot find any context in kube-config file.") return contexts = [context['name'] for context in contexts] active_index = contexts.index(active_context['name']) cluster1, first_index = pick(contexts, title="Pick the first context", default_index=active_index) cluster2, _ = pick(contexts, title="Pick the second context", default_index=first_index) client1 = client.CoreV1Api( api_client=config.new_client_from_config(context=cluster1)) client2 = client.CoreV1Api( api_client=config.new_client_from_config(context=cluster2)) print("\nList of pods on %s:" % cluster1) for i in client1.list_pod_for_all_namespaces().items: print("%s\t%s\t%s" % (i.status.pod_ip, i.metadata.namespace, i.metadata.name)) print("\n\nList of pods on %s:" % cluster2) for i in client2.list_pod_for_all_namespaces().items: print("%s\t%s\t%s" % (i.status.pod_ip, i.metadata.namespace, i.metadata.name))
Example #6
Source File: pipeline.py From freight with Apache License 2.0 | 5 votes |
def load_kube_credentials_gcloud(credentials: Dict[str, str]) -> ApiClient: # Try to pull credentials from gcloud, but first checking if there # is a context, using their auto generated naming scheme, to avoid # calling `gcloud` every time, if we've already authed before. from subprocess import check_call, DEVNULL cluster = credentials["cluster"] project = credentials["project"] zone = credentials["zone"] context = f"gke_{project}_{zone}_{cluster}" try: return new_client_from_config(context=context) except (ConfigException, FileNotFoundError): pass check_call( [ "gcloud", "container", "clusters", "get-credentials", cluster, "--zone", zone, "--project", project, ], stdout=DEVNULL, stderr=DEVNULL, ) return new_client_from_config(context=context)
Example #7
Source File: test_kubernetes.py From mars with Apache License 2.0 | 4 votes |
def testRunInKubernetes(self): self._build_docker_images() temp_spill_dir = tempfile.mkdtemp(prefix='test-mars-k8s-') api_client = k8s_config.new_client_from_config() kube_api = k8s_client.CoreV1Api(api_client) cluster_client = None try: extra_vol_config = HostPathVolumeConfig('mars-src-path', '/mnt/mars', MARS_ROOT) cluster_client = new_cluster(api_client, image=self._docker_image, worker_spill_paths=[temp_spill_dir], extra_volumes=[extra_vol_config], pre_stop_command=['rm', '/tmp/stopping.tmp'], timeout=600, log_when_fail=True) self.assertIsNotNone(cluster_client.endpoint) pod_items = kube_api.list_namespaced_pod(cluster_client.namespace).to_dict() log_processes = [] for item in pod_items['items']: log_processes.append(subprocess.Popen(['kubectl', 'logs', '-f', '-n', cluster_client.namespace, item['metadata']['name']])) a = mt.ones((100, 100), chunk_size=30) * 2 * 1 + 1 b = mt.ones((100, 100), chunk_size=30) * 2 * 1 + 1 c = (a * b * 2 + 1).sum() r = cluster_client.session.run(c, timeout=600) expected = (np.ones(a.shape) * 2 * 1 + 1) ** 2 * 2 + 1 assert_array_equal(r, expected.sum()) # turn off service processes with grace to get coverage data procs = [] for item in pod_items['items']: p = subprocess.Popen(['kubectl', 'exec', '-n', cluster_client.namespace, item['metadata']['name'], '/srv/graceful_stop.sh']) procs.append(p) for p in procs: p.wait() [p.terminate() for p in log_processes] finally: shutil.rmtree(temp_spill_dir) if cluster_client: try: cluster_client.stop(wait=True, timeout=20) except TimeoutError: pass self._remove_docker_image(False)