Python pyVmomi.vim.ClusterComputeResource() Examples
The following are 28
code examples of pyVmomi.vim.ClusterComputeResource().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
pyVmomi.vim
, or try the search function
.
Example #1
Source File: cluster.py From vsphere-automation-sdk-python with MIT License | 7 votes |
def cleanup_cluster(context): """Delete cluster after vcenter sample run""" cluster1_name = context.testbed.config['CLUSTER1_NAME'] names = set([cluster1_name]) cluster_summaries = context.client.vcenter.Cluster.list( Cluster.FilterSpec(names=names)) print("Found '{}' Clusters matching names {}". format(len(cluster_summaries), ", ".join(["'{}'". format(n) for n in names]))) if len(cluster_summaries) < 1: return # Delete the cluster using the managed object cluster = cluster_summaries[0].cluster cluster_mo = vim.ClusterComputeResource(cluster, context.soap_stub) print("Deleting Cluster '{}' ({})".format(cluster, cluster1_name)) task = cluster_mo.Destroy() pyVim.task.WaitForTask(task)
Example #2
Source File: host.py From vsphere-automation-sdk-python with MIT License | 6 votes |
def move_host_into_cluster_vim(context, host_name, cluster_name): """Use vim api to move host to another cluster""" TIMEOUT = 30 # sec host = context.testbed.entities['HOST_IDS'][host_name] host_mo = vim.HostSystem(host, context.soap_stub) # Move the host into the cluster if not host_mo.runtime.inMaintenanceMode: task = host_mo.EnterMaintenanceMode(TIMEOUT) pyVim.task.WaitForTask(task) print("Host '{}' ({}) in maintenance mode".format(host, host_name)) cluster = context.testbed.entities['CLUSTER_IDS'][cluster_name] cluster_mo = vim.ClusterComputeResource(cluster, context.soap_stub) task = cluster_mo.MoveInto([host_mo]) pyVim.task.WaitForTask(task) print("Host '{}' ({}) moved into Cluster {} ({})". format(host, host_name, cluster, cluster_name)) task = host_mo.ExitMaintenanceMode(TIMEOUT) pyVim.task.WaitForTask(task) print("Host '{}' ({}) out of maintenance mode".format(host, host_name))
Example #3
Source File: deploy_ovf_template.py From vsphere-automation-sdk-python with MIT License | 6 votes |
def execute(self): # Find the cluster's resource pool moid cluster_obj = get_obj(self.servicemanager.content, [vim.ClusterComputeResource], self.cluster_name) assert cluster_obj is not None print("Cluster Moref: {0}".format(cluster_obj)) deployment_target = LibraryItem.DeploymentTarget( resource_pool_id=cluster_obj.resourcePool._GetMoId()) lib_item_id = self.helper.get_item_id_by_name(self.lib_item_name) assert lib_item_id ovf_summary = self.client.ovf_lib_item_service.filter(ovf_library_item_id=lib_item_id, target=deployment_target) print('Found an OVF template :{0} to deploy.'.format(ovf_summary.name)) # Deploy the ovf template self.deploy_ovf_template(lib_item_id, ovf_summary, deployment_target)
Example #4
Source File: virtualcenter.py From wrapanapi with MIT License | 6 votes |
def _get_cluster_compute_resource(self, resource_name=None): """ Returns a Compute Cluster Resource managed object. If a name is specified, a vim.ClusterComputeResource object is returned for the specific resource. If no name is specified, the method checks if there are is a default resource specified and returns the object of the resource. Finally, if there is no name or defaults specified, it queries the Vmware provider and picks the first object returned in the list. Args: resource_name (string): The name of the Cluster Compute Resource. If None, first one will be picked. Returns: pyVmomi.vim.ClusterComputeResource: The managed object of the cluster compute resource. """ if resource_name is not None: return self.system.get_obj(vim.ClusterComputeResource, resource_name) elif self.system.default_cluster_compute_resource is not None: return self.system.get_obj(vim.ClusterComputeResource, self.system.default_cluster_compute_resource) else: return self.system.get_obj_list(vim.ClusterComputeResource)[0]
Example #5
Source File: run.py From vcenter-netbox-sync with Apache License 2.0 | 6 votes |
def create_view(self, vc_obj_type): """ Create a view scoped to the vCenter object type desired. This should be called before collecting data about vCenter object types. :param vc_obj_type: vCenter object type to extract, must be key in vc_obj_views :type vc_obj_type: str """ # Mapping of object type keywords to view types vc_obj_views = { "datacenters": vim.Datacenter, "clusters": vim.ClusterComputeResource, "hosts": vim.HostSystem, "virtual_machines": vim.VirtualMachine, } # Ensure an active vCenter session exists if not self.vc_session: log.info("No existing vCenter session found.") self.authenticate() return self.vc_session.viewManager.CreateContainerView( self.vc_session.rootFolder, # View starting point [vc_obj_views[vc_obj_type]], # Object types to look for True # Should we recurively look into view )
Example #6
Source File: create_dvs_and_dvport_group.py From vmware-pyvmomi-examples with Apache License 2.0 | 6 votes |
def main(): try: si = None try: print "Trying to connect to VCENTER SERVER . . ." si = connect.Connect(inputs['vcenter_ip'], 443, inputs['vcenter_user'], inputs['vcenter_password']) except IOError, e: pass atexit.register(Disconnect, si) print "Connected to VCENTER SERVER !" content = si.RetrieveContent() datacenter = get_obj(content, [vim.Datacenter], inputs['datacenter']) cluster = get_obj(content, [vim.ClusterComputeResource], inputs['cluster']) network_folder = datacenter.networkFolder #Create DV Switch dv_switch = create_dvSwitch(si, content, network_folder, cluster) #Add port group to this switch add_dvPort_group(si, dv_switch)
Example #7
Source File: vsphere_class.py From ADLES with Apache License 2.0 | 5 votes |
def get_clusters(self): """ Get all the clusters associated with the vCenter server. :return: All clusters associated with the vCenter server :rtype: list(vim.ClusterComputeResource) """ return self.get_objs(self.content.rootFolder, [vim.ClusterComputeResource])
Example #8
Source File: test_api_rest.py From integrations-core with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_get_resource_tags(realtime_instance): config = VSphereConfig(realtime_instance, logger) mock_api = VSphereRestAPI(config, log=logger) mock_mors = [MagicMock(spec=vim.VirtualMachine, _moId="foo")] resource_tags = mock_api.get_resource_tags_for_mors(mock_mors) expected_resource_tags = { vim.HostSystem: {'10.0.0.104-1': ['my_cat_name_2:my_tag_name_2']}, vim.VirtualMachine: {'VM4-4-1': ['my_cat_name_1:my_tag_name_1', 'my_cat_name_2:my_tag_name_2']}, vim.Datacenter: {}, vim.Datastore: {'NFS-Share-1': ['my_cat_name_2:my_tag_name_2']}, vim.ClusterComputeResource: {}, } assert expected_resource_tags == resource_tags
Example #9
Source File: vsphere.py From integrations-core with BSD 3-Clause "New" or "Revised" License | 5 votes |
def make_batch( self, mors, # type: Iterable[vim.ManagedEntity] metric_ids, # type: List[vim.PerformanceManager.MetricId] resource_type, # type: Type[vim.ManagedEntity] ): # type: (...) -> Generator[MorBatch, None, None] """Iterates over mor and generate batches with a fixed number of metrics to query. Querying multiple resource types in the same call is error prone if we query a cluster metric. Indeed, cluster metrics result in an unpredicatable number of internal metric queries which all count towards max_query_metrics. Therefore often collecting a single cluster metric can make the whole call to fail. That's why we should never batch cluster metrics with anything else. """ # Safeguard, let's avoid collecting multiple resources in the same call mors_filtered = [m for m in mors if isinstance(m, resource_type)] # type: List[vim.ManagedEntity] if resource_type == vim.ClusterComputeResource: # Cluster metrics are unpredictable and a single call can max out the limit. Always collect them one by one. max_batch_size = 1 # type: float elif resource_type in REALTIME_RESOURCES or self.config.max_historical_metrics < 0: # Queries are not limited by vCenter max_batch_size = self.config.metrics_per_query else: # Collection is limited by the value of `max_query_metrics` if self.config.metrics_per_query < 0: max_batch_size = self.config.max_historical_metrics else: max_batch_size = min(self.config.metrics_per_query, self.config.max_historical_metrics) batch = defaultdict(list) # type: MorBatch batch_size = 0 for m in mors_filtered: for metric_id in metric_ids: if batch_size == max_batch_size: yield batch batch = defaultdict(list) batch_size = 0 batch[m].append(metric_id) batch_size += 1 # Do not yield an empty batch if batch: yield batch
Example #10
Source File: utils.py From integrations-core with BSD 3-Clause "New" or "Revised" License | 5 votes |
def get_parent_tags_recursively(mor, infrastructure_data): # type: (vim.ManagedEntity, InfrastructureData) -> List[str] """Go up the resources hierarchy from the given mor. Note that a host running a VM is not considered to be a parent of that VM. rootFolder(vim.Folder): - vm(vim.Folder): VM1-1 VM1-2 - host(vim.Folder): HOST1 HOST2 """ mor_props = infrastructure_data[mor] parent = mor_props.get('parent') if parent: tags = [] parent_props = infrastructure_data.get(parent, {}) parent_name = to_string(parent_props.get('name', 'unknown')) if isinstance(parent, vim.HostSystem): tags.append('vsphere_host:{}'.format(parent_name)) elif isinstance(parent, vim.Folder): tags.append('vsphere_folder:{}'.format(parent_name)) elif isinstance(parent, vim.ComputeResource): if isinstance(parent, vim.ClusterComputeResource): tags.append('vsphere_cluster:{}'.format(parent_name)) tags.append('vsphere_compute:{}'.format(parent_name)) elif isinstance(parent, vim.Datacenter): tags.append('vsphere_datacenter:{}'.format(parent_name)) elif isinstance(parent, vim.Datastore): tags.append('vsphere_datastore:{}'.format(parent_name)) parent_tags = get_parent_tags_recursively(parent, infrastructure_data) parent_tags.extend(tags) return parent_tags return []
Example #11
Source File: vmware_exporter.py From vmware_exporter with BSD 3-Clause "New" or "Revised" License | 5 votes |
def host_labels(self): def _collect(node, level=1, dc=None, folder=None): inventory = {} if isinstance(node, vim.Folder) and not isinstance(node, vim.StoragePod): logging.debug("[Folder ] {level} {name}".format(level=('-' * level).ljust(7), name=node.name)) for child in node.childEntity: inventory.update(_collect(child, level + 1, dc)) elif isinstance(node, vim.Datacenter): logging.debug("[Datacenter] {level} {name}".format(level=('-' * level).ljust(7), name=node.name)) inventory.update(_collect(node.hostFolder, level + 1, node.name)) elif isinstance(node, vim.ComputeResource): logging.debug("[ComputeRes] {level} {name}".format(level=('-' * level).ljust(7), name=node.name)) for host in node.host: inventory.update(_collect(host, level + 1, dc, node)) elif isinstance(node, vim.HostSystem): logging.debug("[HostSystem] {level} {name}".format(level=('-' * level).ljust(7), name=node.name)) inventory[node._moId] = [ node.summary.config.name.rstrip('.'), dc, folder.name if isinstance(folder, vim.ClusterComputeResource) else '' ] else: logging.debug("[? ] {level} {node}".format(level=('-' * level).ljust(7), node=node)) return inventory labels = {} dcs = yield self.datacenter_inventory for dc in dcs: result = yield threads.deferToThread(lambda: _collect(dc)) labels.update(result) return labels
Example #12
Source File: vmware-v2.1.py From Trillian with Apache License 2.0 | 5 votes |
def find_cluster_by_name(content, cluster_name, datacenter=None): if datacenter: folder = datacenter.hostFolder else: folder = content.rootFolder clusters = get_all_objs(content, [vim.ClusterComputeResource], folder) for cluster in clusters: if cluster.name == cluster_name: return cluster return None
Example #13
Source File: vsphere.py From ocs-ci with MIT License | 5 votes |
def get_cluster(self, name, dc): """ Gets the cluster Args: name (str): Cluster name dc (str): Datacenter name Returns: vim.ClusterComputeResource: Cluster instance """ for cluster in self.get_dc(dc).hostFolder.childEntity: if cluster.name == name: return cluster
Example #14
Source File: get_vcenter_id.py From ansible-module-chaperone with Apache License 2.0 | 5 votes |
def core(module): vim_type = module.params['vcenter_vim_type'] vcenter_object_name = module.params['vcenter_object_name'] vim_rec_type = { 'cluster': vim.ClusterComputeResource, 'datacenter': vim.Datacenter, 'datastore': vim.Datastore, 'vds': vim.DistributedVirtualSwitch, 'dvs-port': vim.Network, 'vm': vim.VirtualMachine } try: vimtype = vim_rec_type[vim_type] except KeyError: module.fail_json(msg="Please specify valid vim type: cluster, datacenter, datastore, vds, vm") si = si_connect(module) vcenter_id = get_id(module, si, [vimtype], vcenter_object_name, False, True) return False, vcenter_id
Example #15
Source File: vcin_vdt_configure_from_vsphere.py From vspk-examples with BSD 3-Clause "New" or "Revised" License | 5 votes |
def handle_vdt_datacenter(logger, nc, vc, nuage_vcenter, vc_dc, nc_dc_list, vcenter_name, all_clusters, all_hosts, clusters, hosts, hosts_list, hv_username, hv_password, hv_management_network, hv_data_network, hv_vm_network, hv_mc_network, host_configure_agent, allow_fqdn): # Checking if the Datacenter exists in the Nuage vCenter Deployment Tool logger.debug('Checking vCenter Datacenter %s in Nuage vCenter Deployment Tool' % vc_dc.name) active_nc_dc = None for nc_dc in nc_dc_list: if vc_dc.name == nc_dc.name: active_nc_dc = nc_dc logger.debug('Found Datacenter %s in Nuage vCenter Deployment Tool' % vc_dc.name) break # If the Datacenter does not exist in Nuage vCenter Deployment Tool, create it if not active_nc_dc: logger.debug('Datacenter %s not found in the vCenter %s in the Nuage vCenter Deployment Tool, creating' % (vc_dc.name, vcenter_name)) active_nc_dc = vsdk.NUVCenterDataCenter(name=vc_dc.name) nuage_vcenter.create_child(active_nc_dc) logger.info('Created Datacenter %s from the vCenter %s in the Nuage vCenter Deployment Tool' % (vc_dc.name, vcenter_name)) # Getting clusters in the current vCenter Datacenter logger.debug('Gathering all Clusters from the vCenter Datacenter %s' % vc_dc.name) content = vc.content obj_view = content.viewManager.CreateContainerView(vc_dc, [vim.ClusterComputeResource], True) vc_cl_list = obj_view.view obj_view.Destroy() # Getting clusters in current Nuage Datacenter logger.debug('Gathering all Clusters from the Nuage Datacenter %s' % vc_dc.name) nc_cl_list = active_nc_dc.vcenter_clusters.get() for vc_cl in vc_cl_list: if all_clusters or vc_cl.name in clusters: logger.debug('vCenter Cluster %s is in list that has to be present in the Nuage vCenter Deployment Tool, checking if it already exists.' % vc_cl.name) handle_vdt_cluster(logger=logger, nc=nc, vc=vc, vc_dc=vc_dc, vc_cl=vc_cl, nuage_dc=active_nc_dc, nc_cl_list=nc_cl_list, all_hosts=all_hosts, hosts=hosts, hosts_list=hosts_list, hv_username=hv_username, hv_password=hv_password, hv_management_network=hv_management_network, hv_data_network=hv_data_network, hv_vm_network=hv_vm_network, hv_mc_network=hv_mc_network, host_configure_agent=host_configure_agent, allow_fqdn=allow_fqdn)
Example #16
Source File: vmware.py From skylight with GNU General Public License v3.0 | 5 votes |
def find_cluster_by_name(content, cluster_name, datacenter=None): if datacenter: folder = datacenter.hostFolder else: folder = content.rootFolder return find_object_by_name(content, cluster_name, [vim.ClusterComputeResource], folder=folder)
Example #17
Source File: vsphere_class.py From ADLES with Apache License 2.0 | 5 votes |
def get_cluster(self, cluster_name=None): """ Finds and returns the named Cluster. :param str cluster_name: Name of the cluster [default: first cluster found in datacenter] :return: The cluster found :rtype: vim.ClusterComputeResource or None """ return self.get_item(cluster_name, vim.ClusterComputeResource)
Example #18
Source File: virtualcenter.py From wrapanapi with MIT License | 5 votes |
def list_cluster(self): return [str(h.name) for h in self.get_obj_list(vim.ClusterComputeResource)]
Example #19
Source File: virtualcenter.py From wrapanapi with MIT License | 5 votes |
def _set_vm_relocate_spec(self, resource_pool, host, sparse, progress_callback, deploy_on_ds_cluster): """Set properties for Virtual Machine Relocate Operation specification """ vm_reloc_spec = vim.VirtualMachineRelocateSpec() # Set resource pool if isinstance(resource_pool, vim.ResourcePool): vm_reloc_spec.pool = resource_pool elif isinstance(resource_pool, vim.ClusterComputeResource): vm_reloc_spec.pool = resource_pool.resourcePool else: if deploy_on_ds_cluster: vm_reloc_spec.pool = self._get_cluster_compute_resource(resource_pool).resourcePool else: vm_reloc_spec.pool = self._get_resource_pool(resource_pool) progress_callback("Picked resource pool `{}`".format(vm_reloc_spec.pool.name)) # Target Host for the VM, this could be none vm_reloc_spec.host = (host if isinstance(host, vim.HostSystem) else self.system.get_obj(vim.HostSystem, host)) if sparse: vm_reloc_spec.transform = vim.VirtualMachineRelocateTransformation().sparse progress_callback("Transformation has been set to sparse") else: vm_reloc_spec.transform = vim.VirtualMachineRelocateTransformation().flat progress_callback("Transformation has been set to flat") return vm_reloc_spec
Example #20
Source File: get_cluster_by_name.py From vsphere-automation-sdk-python with MIT License | 5 votes |
def _execute(self): content = self.servicemanager.content cluster_obj = get_obj(content, [vim.ClusterComputeResource], self.cluster_name) if cluster_obj is not None: self.mo_id = cluster_obj._GetMoId() print('Cluster MoId: {0}'.format(self.mo_id)) else: print('Cluster: {0} not found'.format(self.cluster_name))
Example #21
Source File: vim_utils.py From vsphere-automation-sdk-python with MIT License | 5 votes |
def get_cluster_name_by_id(content, name): cluster_obj = get_obj(content, [vim.ClusterComputeResource], name) if cluster_obj is not None: self.mo_id = cluster_obj._GetMoId() print('Cluster MoId: {0}'.format(self.mo_id)) else: print('Cluster: {0} not found'.format(self.cluster_name))
Example #22
Source File: vsphere.py From KubeOperator with Apache License 2.0 | 5 votes |
def list_zone(self, region): params = replace_params(self.vars) st = get_service_instance(params) content = st.RetrieveContent() container = content.rootFolder viewType = [vim.Datacenter] region = get_obj(content, viewType, container, region) zones = [] for entity in region.hostFolder.childEntity: zone = { "storages": [], "networks": [], "cluster": [], } if isinstance(entity, vim.ClusterComputeResource): zone["cluster"] = entity.name zone["resourcePool"] = [] zone["resourcePool"].append(entity.resourcePool.name) for rp in entity.resourcePool.resourcePool: zone["resourcePool"].append(rp.name) for network in entity.network: zone.get("networks").append(network.name) for datastore in entity.datastore: zone.get("storages").append({ "name": datastore.name, "type": datastore.summary.type, "multipleHostAccess": datastore.summary.multipleHostAccess }) zones.append(zone) return zones
Example #23
Source File: vmware_guest2.py From skylight with GNU General Public License v3.0 | 5 votes |
def get_cluster(self, cluster): if cluster not in self.clusters: self.clusters[cluster] = self.find_obj(self.content, [vim.ClusterComputeResource], cluster) return self.clusters[cluster]
Example #24
Source File: disable_HA_on_particular_VM.py From vmware-pyvmomi-examples with Apache License 2.0 | 4 votes |
def main(): try: si = None try: print "Trying to connect to VCENTER SERVER . . ." #si = connect.Connect(args.host, int(args.port), args.user, args.password, service="hostd") si = connect.Connect(inputs['vcenter_ip'], 443, inputs['vcenter_user'], inputs['vcenter_password']) except IOError, e: pass atexit.register(Disconnect, si) print "Connected to VCENTER SERVER %s " % inputs['vcenter_ip'] content = si.RetrieveContent() vm = get_obj(content, [vim.VirtualMachine], inputs['vm_name']) cluster = get_obj(content, [vim.ClusterComputeResource], inputs['cluster']) settings = [] cluster_spec = vim.cluster.ConfigSpec() config_spec = vim.cluster.DasVmConfigSpec() config_spec.operation = vim.option.ArrayUpdateSpec.Operation.edit config_info = vim.cluster.DasVmConfigInfo() config_info.key = vm config_info.restartPriority = vim.cluster.DasVmConfigInfo.Priority.disabled vm_settings = vim.cluster.DasVmSettings() #vm_settings.isolationResponse = vim.cluster.DasVmSettings.IsolationResponse.shutdown vm_settings.restartPriority = vim.cluster.DasVmSettings.RestartPriority.disabled monitor = vim.cluster.VmToolsMonitoringSettings() monitor.vmMonitoring = vim.cluster.DasConfigInfo.VmMonitoringState.vmMonitoringDisabled monitor.clusterSettings = False vm_settings.vmToolsMonitoringSettings = monitor config_info.dasSettings = vm_settings config_spec.info = config_info settings.append(config_spec) cluster_spec.dasVmConfigSpec = settings print "Disabling HA for VM ", vm.name task = cluster.ReconfigureCluster_Task(cluster_spec, True) wait_for_task(task, si) print "Successfully reconfigured HA priority"
Example #25
Source File: linked_clone.py From pyvmomi-community-samples with Apache License 2.0 | 4 votes |
def main(): args = get_args() urllib3.disable_warnings() si = None context = None if hasattr(ssl, 'SSLContext'): context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) context.verify_mode = ssl.CERT_NONE if context: # Python >= 2.7.9 si = SmartConnect(host=args.host, port=int(args.port), user=args.user, pwd=args.password, sslContext=context) else: # Python >= 2.7.7 si = SmartConnect(host=args.host, port=int(args.port), user=args.user, pwd=args.password) atexit.register(Disconnect, si) print "Connected to vCenter Server" content = si.RetrieveContent() datacenter = get_obj(content, [vim.Datacenter], args.datacenter_name) if not datacenter: raise Exception("Couldn't find the Datacenter with the provided name " "'{}'".format(args.datacenter_name)) cluster = get_obj(content, [vim.ClusterComputeResource], args.cluster_name, datacenter.hostFolder) if not cluster: raise Exception("Couldn't find the Cluster with the provided name " "'{}'".format(args.cluster_name)) host_obj = None for host in cluster.host: if host.name == args.host_name: host_obj = host break vm_folder = datacenter.vmFolder template = get_obj(content, [vim.VirtualMachine], args.template_name, vm_folder) if not template: raise Exception("Couldn't find the template with the provided name " "'{}'".format(args.template_name)) location = _get_relocation_spec(host_obj, cluster.resourcePool) _take_template_snapshot(si, template) _clone_vm(si, template, args.vm_name, vm_folder, location)
Example #26
Source File: update_esxi_advanced_settings.py From pyvmomi-community-samples with Apache License 2.0 | 4 votes |
def main(): """ Simple command-line program demonstrating how to update ESXi Advanced Settings """ args = get_args() try: service_instance = connect.SmartConnectNoSSL(host=args.host, user=args.user, pwd=args.password, port=int(args.port)) if not service_instance: print("Could not connect to the specified host using specified " "username and password") return -1 atexit.register(connect.Disconnect, service_instance) content = service_instance.RetrieveContent() cluster = get_obj(content, [vim.ClusterComputeResource], args.cluster_name) hosts = cluster.host for host in hosts: optionManager = host.configManager.advancedOption option = vim.option.OptionValue(key=args.key, value=long(args.value)) print("Updating %s on ESXi host %s " "with value of %s" % (args.key, host.name, args.value)) optionManager.UpdateOptions(changedValue=[option]) except vmodl.MethodFault as e: print("Caught vmodl fault : " + e.msg) return -1 except Exception as e: print("Caught exception : " + str(e)) return -1 return 0 # Start program
Example #27
Source File: 01_get_vmware_vm_info.py From blueprints with MIT License | 4 votes |
def parse_service_instance(clustername, service_instance): ''' :param service_instance: :return: ''' content = service_instance.RetrieveContent() object_view = content.viewManager.CreateContainerView(content.rootFolder, [], True) vm_info_list = [] vm_info_list.append(["virtual_machine_name","virtual_machine_uuid","virtual_machine_ip","num_cpu", "num_vcpus","memory_size","guest_family","host_uuid","datastore","power_state"]) for obj in object_view.view: if isinstance(obj, vim.ComputeResource): if isinstance(obj, vim.ClusterComputeResource) and obj.name == clustername : #instance_name,instance_id,address,num_sockets,num_vcpus_per_socket,memory_size_mib,guestFamily,host_uuid, datastore for h in obj.host: nic = h.config.network.vnic[0].spec esxi_config = h.summary.config #host_ip = h.summary.config.name #host_id = str(h).split(":")[1][:-1] host_uuid = h.hardware.systemInfo.uuid for vx in h.vm: if vx.summary.config.template is False: ## Check if guestFullName contains `Windows` to determine the guest os is windows ## otherwise we assumed that it is linux. os = "Windows" if "Windows" in vx.summary.config.guestFullName else "Linux" power_state = "poweron" if vx.runtime.powerState == "poweredOn" else "poweroff" ## We assume here that guest os datastore will always have url start as `ds:///vmfs/volumes` datastore = [ d.info.url for d in vx.datastore if d.info.url.startswith("ds:///vmfs/volumes")] vm_info = [vx.name, vx.config.instanceUuid, vx.summary.guest.ipAddress, vx.config.hardware.numCPU, vx.config.hardware.numCoresPerSocket, vx.summary.config.memorySizeMB, os, host_uuid, datastore[0], power_state] vm_info_list.append(vm_info) object_view.Destroy() with open("{}.csv".format(clustername), 'w') as file: writer = csv.writer(file) writer.writerows(vm_info_list)
Example #28
Source File: vsphere.py From KubeOperator with Apache License 2.0 | 4 votes |
def create_image(self, zone): params = replace_params(self.vars) st = get_service_instance(params) content = st.RetrieveContent() container = content.rootFolder viewType = [vim.Folder] folder = get_obj(content, viewType, container, 'kubeoperator') dc = get_obj(content, [vim.Datacenter], container, zone.region.cloud_region) if not folder: dc.vmFolder.CreateFolder('kubeoperator') logger.info("create vm folder : {}".format('kubeoperator')) folder = get_obj(content, viewType, container, 'kubeoperator') viewType = [vim.VirtualMachine] vm = get_obj(content, viewType, folder, zone.region.image_name) ds = get_obj(content, [vim.Datastore], container, zone.vars['vc_storage']) cluster = get_obj(content, [vim.ClusterComputeResource], container, zone.vars['vc_cluster']) if not vm: manager = st.content.ovfManager spec_params = vim.OvfManager.CreateImportSpecParams() ovf_path = zone.region.image_ovf_path vmdk_path = zone.region.image_vmdk_path ovfd = get_ovf_descriptor(ovf_path) resource_pool = cluster.resourcePool import_spec = manager.CreateImportSpec(ovfd, resource_pool, ds, spec_params) lease = resource_pool.ImportVApp(import_spec.importSpec, folder) while True: if lease.state == vim.HttpNfcLease.State.ready: url = lease.info.deviceUrl[0].url.replace('*', self.vars['vc_host']) keepalive_thread = Thread(target=keep_lease_alive, args=(lease,)) keepalive_thread.start() curl_cmd = ( "curl -Ss -X POST --insecure -T %s -H 'Content-Type: \ application/x-vnd.vmware-streamVmdk' %s" % (vmdk_path, url)) os.system(curl_cmd) lease.HttpNfcLeaseComplete() keepalive_thread.join() vm = get_obj(content, [vim.VirtualMachine], folder, zone.region.image_name) vm.MarkAsTemplate() break elif lease.state == vim.HttpNfcLease.State.error: print("Lease error: " + lease.state.error) break