io.fabric8.kubernetes.api.model.NodeSelectorRequirementBuilder Java Examples
The following examples show how to use
io.fabric8.kubernetes.api.model.NodeSelectorRequirementBuilder.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KubernetesAppDeployerTests.java From spring-cloud-deployer-kubernetes with Apache License 2.0 | 5 votes |
@Test public void testNodeAffinityGlobalProperty() { AppDefinition definition = new AppDefinition("app-test", null); AppDeploymentRequest appDeploymentRequest = new AppDeploymentRequest(definition, getResource(), null); KubernetesDeployerProperties kubernetesDeployerProperties = new KubernetesDeployerProperties(); NodeSelectorTerm nodeSelectorTerm = new NodeSelectorTerm(); nodeSelectorTerm.setMatchExpressions(Arrays.asList(new NodeSelectorRequirementBuilder() .withKey("kubernetes.io/e2e-az-name") .withOperator("In") .withValues("e2e-az1", "e2e-az2") .build())); NodeSelectorTerm nodeSelectorTerm2 = new NodeSelectorTerm(); nodeSelectorTerm2.setMatchExpressions(Arrays.asList(new NodeSelectorRequirementBuilder() .withKey("another-node-label-key") .withOperator("In") .withValues("another-node-label-value2") .build())); PreferredSchedulingTerm preferredSchedulingTerm = new PreferredSchedulingTerm(nodeSelectorTerm2, 1); NodeAffinity nodeAffinity = new AffinityBuilder() .withNewNodeAffinity() .withNewRequiredDuringSchedulingIgnoredDuringExecution() .withNodeSelectorTerms(nodeSelectorTerm) .endRequiredDuringSchedulingIgnoredDuringExecution() .withPreferredDuringSchedulingIgnoredDuringExecution(preferredSchedulingTerm) .endNodeAffinity() .buildNodeAffinity(); kubernetesDeployerProperties.setNodeAffinity(nodeAffinity); deployer = new KubernetesAppDeployer(kubernetesDeployerProperties, null); PodSpec podSpec = deployer.createPodSpec(appDeploymentRequest); NodeAffinity nodeAffinityTest = podSpec.getAffinity().getNodeAffinity(); assertNotNull("Node affinity should not be null", nodeAffinityTest); assertNotNull("RequiredDuringSchedulingIgnoredDuringExecution should not be null", nodeAffinityTest.getRequiredDuringSchedulingIgnoredDuringExecution()); assertEquals("PreferredDuringSchedulingIgnoredDuringExecution should have one element", 1, nodeAffinityTest.getPreferredDuringSchedulingIgnoredDuringExecution().size()); }
Example #2
Source File: PersistentVolumeTest.java From kubernetes-client with Apache License 2.0 | 5 votes |
@Test public void testBuild() { PersistentVolume persistentVolume = new PersistentVolumeBuilder() .withNewMetadata().withName("persistentvolume").endMetadata() .withNewSpec() .addToCapacity(Collections.singletonMap("storage", new Quantity("500Gi"))) .withAccessModes("ReadWriteOnce") .withPersistentVolumeReclaimPolicy("Retain") .withStorageClassName("local-storage") .withNewLocal() .withPath("/mnt/disks/vol1") .endLocal() .withNewNodeAffinity() .withNewRequired() .addNewNodeSelectorTerm() .withMatchExpressions(Arrays.asList(new NodeSelectorRequirementBuilder() .withKey("kubernetes.io/hostname") .withOperator("In") .withValues("my-node") .build() )) .endNodeSelectorTerm() .endRequired() .endNodeAffinity() .endSpec() .build(); server.expect().withPath("/api/v1/persistentvolumes/persistentvolume").andReturn(200, persistentVolume).once(); KubernetesClient client = server.getClient(); persistentVolume = client.persistentVolumes().withName("persistentvolume").get(); assertNotNull(persistentVolume); }
Example #3
Source File: KubernetesAppDeployerTests.java From spring-cloud-deployer-kubernetes with Apache License 2.0 | 4 votes |
@Test public void testNodeAffinityPropertyOverrideGlobal() { Map<String, String> props = new HashMap<>(); props.put("spring.cloud.deployer.kubernetes.affinity.nodeAffinity", "{ requiredDuringSchedulingIgnoredDuringExecution:" + " { nodeSelectorTerms:" + " [ { matchExpressions:" + " [ { key: 'kubernetes.io/e2e-az-name', " + " operator: 'In'," + " values:" + " [ 'e2e-az1', 'e2e-az2']}]}]}, " + " preferredDuringSchedulingIgnoredDuringExecution:" + " [ { weight: 1," + " preference:" + " { matchExpressions:" + " [ { key: 'another-node-label-key'," + " operator: 'In'," + " values:" + " [ 'another-node-label-value' ]}]}}]}"); AppDefinition definition = new AppDefinition("app-test", null); AppDeploymentRequest appDeploymentRequest = new AppDeploymentRequest(definition, getResource(), props); KubernetesDeployerProperties kubernetesDeployerProperties = new KubernetesDeployerProperties(); NodeSelectorTerm nodeSelectorTerm = new NodeSelectorTerm(); nodeSelectorTerm.setMatchExpressions(Arrays.asList(new NodeSelectorRequirementBuilder() .withKey("kubernetes.io/e2e-az-name") .withOperator("In") .withValues("e2e-az1", "e2e-az2") .build())); NodeAffinity nodeAffinity = new AffinityBuilder() .withNewNodeAffinity() .withNewRequiredDuringSchedulingIgnoredDuringExecution() .withNodeSelectorTerms(nodeSelectorTerm) .endRequiredDuringSchedulingIgnoredDuringExecution() .endNodeAffinity() .buildNodeAffinity(); kubernetesDeployerProperties.setNodeAffinity(nodeAffinity); deployer = new KubernetesAppDeployer(kubernetesDeployerProperties, null); PodSpec podSpec = deployer.createPodSpec(appDeploymentRequest); NodeAffinity nodeAffinityTest = podSpec.getAffinity().getNodeAffinity(); assertNotNull("Node affinity should not be null", nodeAffinityTest); assertNotNull("RequiredDuringSchedulingIgnoredDuringExecution should not be null", nodeAffinityTest.getRequiredDuringSchedulingIgnoredDuringExecution()); assertEquals("PreferredDuringSchedulingIgnoredDuringExecution should have one element", 1, nodeAffinityTest.getPreferredDuringSchedulingIgnoredDuringExecution().size()); }
Example #4
Source File: KafkaCluster.java From strimzi-kafka-operator with Apache License 2.0 | 4 votes |
/** * Returns a combined affinity: Adding the affinity needed for the "kafka-rack" to the {@link #getUserAffinity()}. */ @Override protected Affinity getMergedAffinity() { Affinity userAffinity = getUserAffinity(); AffinityBuilder builder = new AffinityBuilder(userAffinity == null ? new Affinity() : userAffinity); if (rack != null) { // If there's a rack config, we need to add a podAntiAffinity to spread the brokers among the racks builder = builder .editOrNewPodAntiAffinity() .addNewPreferredDuringSchedulingIgnoredDuringExecution() .withWeight(100) .withNewPodAffinityTerm() .withTopologyKey(rack.getTopologyKey()) .withNewLabelSelector() .addToMatchLabels(Labels.STRIMZI_CLUSTER_LABEL, cluster) .addToMatchLabels(Labels.STRIMZI_NAME_LABEL, name) .endLabelSelector() .endPodAffinityTerm() .endPreferredDuringSchedulingIgnoredDuringExecution() .endPodAntiAffinity(); // We also need to add node affinity to make sure the pods are scheduled only on nodes with the rack label NodeSelectorRequirement selector = new NodeSelectorRequirementBuilder() .withNewOperator("Exists") .withNewKey(rack.getTopologyKey()) .build(); if (userAffinity != null && userAffinity.getNodeAffinity() != null && userAffinity.getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution() != null && userAffinity.getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution().getNodeSelectorTerms() != null) { // User has specified some Node Selector Terms => we should enhance them List<NodeSelectorTerm> oldTerms = userAffinity.getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution().getNodeSelectorTerms(); List<NodeSelectorTerm> enhancedTerms = new ArrayList<>(oldTerms.size()); for (NodeSelectorTerm term : oldTerms) { NodeSelectorTerm enhancedTerm = new NodeSelectorTermBuilder(term) .addToMatchExpressions(selector) .build(); enhancedTerms.add(enhancedTerm); } builder = builder .editOrNewNodeAffinity() .withNewRequiredDuringSchedulingIgnoredDuringExecution() .withNodeSelectorTerms(enhancedTerms) .endRequiredDuringSchedulingIgnoredDuringExecution() .endNodeAffinity(); } else { // User has not specified any selector terms => we add our own builder = builder .editOrNewNodeAffinity() .editOrNewRequiredDuringSchedulingIgnoredDuringExecution() .addNewNodeSelectorTerm() .withMatchExpressions(selector) .endNodeSelectorTerm() .endRequiredDuringSchedulingIgnoredDuringExecution() .endNodeAffinity(); } } return builder.build(); }
Example #5
Source File: PlanUtils.java From enmasse with Apache License 2.0 | 4 votes |
public static PodTemplateSpec createTemplateSpec(Map<String, String> labels, String nodeAffinityValue, String tolerationKey) { PodTemplateSpecBuilder builder = new PodTemplateSpecBuilder(); if (labels != null) { builder.editOrNewMetadata() .withLabels(labels) .endMetadata(); } if (nodeAffinityValue != null) { builder.editOrNewSpec() .editOrNewAffinity() .editOrNewNodeAffinity() .addToPreferredDuringSchedulingIgnoredDuringExecution(new PreferredSchedulingTermBuilder() .withWeight(1) .withNewPreference() .addToMatchExpressions(new NodeSelectorRequirementBuilder() .withKey("node-label-key") .withOperator("In") .addToValues(nodeAffinityValue) .build()) .endPreference() .build()) .endNodeAffinity() .endAffinity() .endSpec(); } if (tolerationKey != null) { builder.editOrNewSpec() .addNewToleration() .withKey(tolerationKey) .withOperator("Exists") .withEffect("NoSchedule") .endToleration() .endSpec(); } /* TODO: Not always supported by cluster if (priorityClassName != null) { builder.editOrNewSpec() .withPriorityClassName(priorityClassName) .endSpec(); }*/ return builder.build(); }
Example #6
Source File: PersistentVolumeExample.java From kubernetes-client with Apache License 2.0 | 4 votes |
public static void main(String args[]) throws InterruptedException { String master = "https://192.168.99.100:8443/"; if (args.length == 1) { master = args[0]; } log("Using master with url ", master); Config config = new ConfigBuilder().withMasterUrl(master).build(); try (final KubernetesClient client = new DefaultKubernetesClient(config)) { log("Creating persistent volume object"); PersistentVolume pv = new PersistentVolumeBuilder() .withNewMetadata().withName("example-local-pv").endMetadata() .withNewSpec() .addToCapacity(Collections.singletonMap("storage", new Quantity("500Gi"))) .withAccessModes("ReadWriteOnce") .withPersistentVolumeReclaimPolicy("Retain") .withStorageClassName("local-storage") .withNewLocal() .withPath("/mnt/disks/vol1") .endLocal() .withNewNodeAffinity() .withNewRequired() .addNewNodeSelectorTerm() .withMatchExpressions(Arrays.asList(new NodeSelectorRequirementBuilder() .withKey("kubernetes.io/hostname") .withOperator("In") .withValues("my-node") .build() )) .endNodeSelectorTerm() .endRequired() .endNodeAffinity() .endSpec() .build(); client.persistentVolumes().create(pv); log("Successfully created Persistent Volume object"); } catch (KubernetesClientException e) { log("Could not create resource", e.getMessage()); } }
Example #7
Source File: PersistentVolumeClaimExample.java From kubernetes-client with Apache License 2.0 | 4 votes |
public static void main(String[] args) { String master = "https://localhost:8443"; String namespace = "default"; String storageClassName = "my-local-storage"; if (args.length == 1) { master = args[0]; } log("Using master with url ", master); Config config = new ConfigBuilder().withMasterUrl(master).build(); try (final KubernetesClient client = new DefaultKubernetesClient(config)) { try { StorageClass storageClass = client.storage().storageClasses().load(PersistentVolumeClaimExample.class.getResourceAsStream("/test-storage.yml")).get(); client.storage().storageClasses().create(storageClass); log("Creating PersistentVolume object"); PersistentVolume pv = new PersistentVolumeBuilder() .withNewMetadata().withName("test-local-pv").endMetadata() .withNewSpec() .addToCapacity(Collections.singletonMap("storage", new Quantity("500Gi"))) .withAccessModes("ReadWriteOnce") .withPersistentVolumeReclaimPolicy("Retain") .withStorageClassName(storageClassName) .withNewLocal() .withPath("/mnt/disks/vol1") .endLocal() .withNewNodeAffinity() .withNewRequired() .addNewNodeSelectorTerm() .withMatchExpressions(Arrays.asList(new NodeSelectorRequirementBuilder() .withKey("kubernetes.io/hostname") .withOperator("In") .withValues("my-node") .build() )) .endNodeSelectorTerm() .endRequired() .endNodeAffinity() .endSpec() .build(); client.persistentVolumes().create(pv); log("Successfully created PersistentVolume object"); log("Creating PersistentVolumeClaim object"); PersistentVolumeClaim persistentVolumeClaim = new PersistentVolumeClaimBuilder() .withNewMetadata().withName("test-pv-claim").withNamespace(namespace).endMetadata() .withNewSpec() .withStorageClassName(storageClassName) .withAccessModes("ReadWriteOnce") .withNewResources() .addToRequests("storage", new Quantity("500Gi")) .endResources() .endSpec() .build(); client.persistentVolumeClaims().create(persistentVolumeClaim); log("Successfully created PersistentVolumeClaim object"); log("Creating pod"); Pod pod = client.pods().inNamespace(namespace).load(PersistentVolumeClaimExample.class.getResourceAsStream("/test-pv-pod.yml")).get(); client.pods().inNamespace(namespace).create(pod); log("Successfully created pod"); } finally { client.persistentVolumeClaims().inNamespace(namespace).withName("test-pv-claim").delete(); client.persistentVolumes().withName("test-local-pv").delete(); //fixed the name client.pods().inNamespace("default").withName("test-pv-pod").delete(); // you forgot to remove the pod client.storage().storageClasses().withName(storageClassName).delete(); } } catch (KubernetesClientException e) { log("Could not create resource", e.getMessage()); } }