io.fabric8.openshift.api.model.DeploymentConfig Java Examples
The following examples show how to use
io.fabric8.openshift.api.model.DeploymentConfig.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaConnectS2IClusterTest.java From strimzi-kafka-operator with Apache License 2.0 | 6 votes |
@Test public void testGenerateDeploymentConfigWithTls() { KafkaConnectS2I resource = new KafkaConnectS2IBuilder(this.resource) .editSpec() .editOrNewTls() .addToTrustedCertificates(new CertSecretSourceBuilder().withSecretName("my-secret").withCertificate("cert.crt").build()) .addToTrustedCertificates(new CertSecretSourceBuilder().withSecretName("my-secret").withCertificate("new-cert.crt").build()) .addToTrustedCertificates(new CertSecretSourceBuilder().withSecretName("my-another-secret").withCertificate("another-cert.crt").build()) .endTls() .endSpec() .build(); KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(1).getName(), is("my-secret")); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(2).getName(), is("my-another-secret")); List<Container> containers = dep.getSpec().getTemplate().getSpec().getContainers(); assertThat(containers.get(0).getVolumeMounts().get(1).getMountPath(), is(KafkaConnectCluster.TLS_CERTS_BASE_VOLUME_MOUNT + "my-secret")); assertThat(containers.get(0).getVolumeMounts().get(2).getMountPath(), is(KafkaConnectCluster.TLS_CERTS_BASE_VOLUME_MOUNT + "my-another-secret")); assertThat(AbstractModel.containerEnvVars(containers.get(0)).get(KafkaConnectCluster.ENV_VAR_KAFKA_CONNECT_TRUSTED_CERTS), is("my-secret/cert.crt;my-secret/new-cert.crt;my-another-secret/another-cert.crt")); assertThat(AbstractModel.containerEnvVars(containers.get(0)).get(KafkaConnectCluster.ENV_VAR_KAFKA_CONNECT_TLS), is("true")); }
Example #2
Source File: KafkaConnectS2IClusterTest.java From strimzi-kafka-operator with Apache License 2.0 | 6 votes |
@Test public void testExternalConfigurationSecretEnvs() { ExternalConfigurationEnv env = new ExternalConfigurationEnvBuilder() .withName("MY_ENV_VAR") .withNewValueFrom() .withSecretKeyRef(new SecretKeySelectorBuilder().withName("my-secret").withKey("my-key").withOptional(false).build()) .endValueFrom() .build(); KafkaConnectS2I resource = new KafkaConnectS2IBuilder(this.resource) .editSpec() .withNewExternalConfiguration() .withEnv(env) .endExternalConfiguration() .endSpec() .build(); KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); // Check DeploymentConfig DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); List<EnvVar> envs = dep.getSpec().getTemplate().getSpec().getContainers().get(0).getEnv(); List<EnvVar> selected = envs.stream().filter(var -> var.getName().equals("MY_ENV_VAR")).collect(Collectors.toList()); assertThat(selected.size(), is(1)); assertThat(selected.get(0).getName(), is("MY_ENV_VAR")); assertThat(selected.get(0).getValueFrom().getSecretKeyRef(), is(env.getValueFrom().getSecretKeyRef())); }
Example #3
Source File: KafkaConnectS2IClusterTest.java From strimzi-kafka-operator with Apache License 2.0 | 6 votes |
@Test public void testExternalConfigurationConfigEnvs() { ExternalConfigurationEnv env = new ExternalConfigurationEnvBuilder() .withName("MY_ENV_VAR") .withNewValueFrom() .withConfigMapKeyRef(new ConfigMapKeySelectorBuilder().withName("my-map").withKey("my-key").withOptional(false).build()) .endValueFrom() .build(); KafkaConnectS2I resource = new KafkaConnectS2IBuilder(this.resource) .editSpec() .withNewExternalConfiguration() .withEnv(env) .endExternalConfiguration() .endSpec() .build(); KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); // Check DeploymentConfig DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); List<EnvVar> envs = dep.getSpec().getTemplate().getSpec().getContainers().get(0).getEnv(); List<EnvVar> selected = envs.stream().filter(var -> var.getName().equals("MY_ENV_VAR")).collect(Collectors.toList()); assertThat(selected.size(), is(1)); assertThat(selected.get(0).getName(), is("MY_ENV_VAR")); assertThat(selected.get(0).getValueFrom().getConfigMapKeyRef(), is(env.getValueFrom().getConfigMapKeyRef())); }
Example #4
Source File: OpenShiftServiceImpl.java From syndesis with Apache License 2.0 | 6 votes |
@Override public boolean isScaled(String name, int desiredMinimumReplicas, Map<String, String> labels) { List<DeploymentConfig> deploymentConfigs = getDeploymentsByLabel(labels); if (deploymentConfigs.isEmpty()) { return false; } DeploymentConfig dc = deploymentConfigs.get(0); int allReplicas = 0; int availableReplicas = 0; if (dc != null && dc.getStatus() != null) { DeploymentConfigStatus status = dc.getStatus(); allReplicas = nullSafe(status.getReplicas()); availableReplicas = nullSafe(status.getAvailableReplicas()); } return desiredMinimumReplicas <= allReplicas && desiredMinimumReplicas <= availableReplicas; }
Example #5
Source File: OpenshiftHandler.java From dekorate with Apache License 2.0 | 6 votes |
/** * Creates a {@link DeploymentConfig} for the {@link OpenshiftConfig}. * @param config The sesssion. * @return The deployment config. */ public DeploymentConfig createDeploymentConfig(OpenshiftConfig config, ImageConfiguration imageConfig) { Map<String, String> labels = Labels.createLabels(config); return new DeploymentConfigBuilder() .withNewMetadata() .withName(config.getName()) .withLabels(labels) .endMetadata() .withNewSpec() .withReplicas(1) .withTemplate(createPodTemplateSpec(config, imageConfig, labels)) .withSelector(labels) .endSpec() .build(); }
Example #6
Source File: KafkaConnectS2IClusterTest.java From strimzi-kafka-operator with Apache License 2.0 | 6 votes |
@Test public void testGenerateDeploymentWithTlsSameSecret() { KafkaConnectS2I resource = new KafkaConnectS2IBuilder(this.resource) .editSpec() .editOrNewTls() .addToTrustedCertificates(new CertSecretSourceBuilder().withSecretName("my-secret").withCertificate("cert.crt").build()) .endTls() .withAuthentication( new KafkaClientAuthenticationTlsBuilder() .withNewCertificateAndKey() .withSecretName("my-secret") .withCertificate("user.crt") .withKey("user.key") .endCertificateAndKey() .build()) .endSpec() .build(); KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); // 2 = 1 volume from logging/metrics + just 1 from above certs Secret assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().size(), is(2)); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(1).getName(), is("my-secret")); }
Example #7
Source File: DeploymentConfigOperationsImpl.java From kubernetes-client with Apache License 2.0 | 6 votes |
private List<RollableScalableResource<ReplicaSet, DoneableReplicaSet>> doGetLog() { List<RollableScalableResource<ReplicaSet, DoneableReplicaSet>> rcs = new ArrayList<>(); DeploymentConfig deploymentConfig = fromServer().get(); String rcUid = deploymentConfig.getMetadata().getUid(); ReplicaSetOperationsImpl rsOperations = new ReplicaSetOperationsImpl((RollingOperationContext) context); ReplicaSetList rcList = rsOperations.withLabels(deploymentConfig.getMetadata().getLabels()).list(); for (ReplicaSet rs : rcList.getItems()) { OwnerReference ownerReference = KubernetesResourceUtil.getControllerUid(rs); if (ownerReference != null && ownerReference.getUid().equals(rcUid)) { rcs.add(rsOperations.withName(rs.getMetadata().getName())); } } return rcs; }
Example #8
Source File: KafkaConnectS2IClusterTest.java From strimzi-kafka-operator with Apache License 2.0 | 6 votes |
@Test public void testNoExternalConfigurationVolumes() { ExternalConfigurationVolumeSource volume = new ExternalConfigurationVolumeSourceBuilder() .withName("my-volume") .build(); KafkaConnectS2I resource = new KafkaConnectS2IBuilder(this.resource) .editSpec() .withNewExternalConfiguration() .withVolumes(volume) .endExternalConfiguration() .endSpec() .build(); KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); // Check Deployment DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); List<Volume> volumes = dep.getSpec().getTemplate().getSpec().getVolumes(); List<Volume> selected = volumes.stream().filter(vol -> vol.getName().equals(KafkaConnectCluster.EXTERNAL_CONFIGURATION_VOLUME_NAME_PREFIX + "my-volume")).collect(Collectors.toList()); assertThat(selected.size(), is(0)); List<VolumeMount> volumeMounths = dep.getSpec().getTemplate().getSpec().getContainers().get(0).getVolumeMounts(); List<VolumeMount> selectedVolumeMounths = volumeMounths.stream().filter(vol -> vol.getName().equals(KafkaConnectCluster.EXTERNAL_CONFIGURATION_VOLUME_NAME_PREFIX + "my-volume")).collect(Collectors.toList()); assertThat(selected.size(), is(0)); }
Example #9
Source File: KafkaConnectS2IClusterTest.java From strimzi-kafka-operator with Apache License 2.0 | 6 votes |
@Test public void testInvalidExternalConfigurationEnvs() { ExternalConfigurationEnv env = new ExternalConfigurationEnvBuilder() .withName("MY_ENV_VAR") .withNewValueFrom() .withConfigMapKeyRef(new ConfigMapKeySelectorBuilder().withName("my-map").withKey("my-key").withOptional(false).build()) .withSecretKeyRef(new SecretKeySelectorBuilder().withName("my-secret").withKey("my-key").withOptional(false).build()) .endValueFrom() .build(); KafkaConnectS2I resource = new KafkaConnectS2IBuilder(this.resource) .editSpec() .withNewExternalConfiguration() .withEnv(env) .endExternalConfiguration() .endSpec() .build(); KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); // Check Deployment DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); List<EnvVar> envs = dep.getSpec().getTemplate().getSpec().getContainers().get(0).getEnv(); List<EnvVar> selected = envs.stream().filter(var -> var.getName().equals("MY_ENV_VAR")).collect(Collectors.toList()); assertThat(selected.size(), is(0)); }
Example #10
Source File: KafkaConnectS2IClusterTest.java From strimzi-kafka-operator with Apache License 2.0 | 6 votes |
@Test public void testNoExternalConfigurationEnvs() { ExternalConfigurationEnv env = new ExternalConfigurationEnvBuilder() .withName("MY_ENV_VAR") .withNewValueFrom() .endValueFrom() .build(); KafkaConnectS2I resource = new KafkaConnectS2IBuilder(this.resource) .editSpec() .withNewExternalConfiguration() .withEnv(env) .endExternalConfiguration() .endSpec() .build(); KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); // Check Deployment DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); List<EnvVar> envs = dep.getSpec().getTemplate().getSpec().getContainers().get(0).getEnv(); List<EnvVar> selected = envs.stream().filter(var -> var.getName().equals("MY_ENV_VAR")).collect(Collectors.toList()); assertThat(selected.size(), is(0)); }
Example #11
Source File: KafkaConnectS2IClusterTest.java From strimzi-kafka-operator with Apache License 2.0 | 6 votes |
@Test public void testGracePeriod() { KafkaConnectS2I resource = new KafkaConnectS2IBuilder(this.resource) .editSpec() .withNewTemplate() .withNewPod() .withTerminationGracePeriodSeconds(123) .endPod() .endTemplate() .endSpec() .build(); KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getTerminationGracePeriodSeconds(), is(Long.valueOf(123))); }
Example #12
Source File: HandlersTest.java From kubernetes-client with Apache License 2.0 | 6 votes |
@Test void checkHandlers() { checkHandler(new BuildConfig(), new BuildConfigHandler()); checkHandler(new Build(), new BuildHandler()); checkHandler(new DeploymentConfig(), new DeploymentConfigHandler()); checkHandler(new Group(), new GroupHandler()); checkHandler(new Identity(), new IdentityHandler()); checkHandler(new Image(), new ImageHandler()); checkHandler(new ImageStream(), new ImageStreamHandler()); checkHandler(new ImageStreamTag(), new ImageStreamTagHandler()); checkHandler(new NetNamespace(), new NetNamespaceHandler()); checkHandler(new OAuthAccessToken(), new OAuthAccessTokenHandler()); checkHandler(new OAuthAuthorizeToken(), new OAuthAuthorizeTokenHandler()); checkHandler(new OAuthClient(), new OAuthClientHandler()); checkHandler(new Project(), new ProjectHandler()); checkHandler(new Route(), new RouteHandler()); checkHandler(new SecurityContextConstraints(), new SecurityContextConstraintsHandler()); checkHandler(new User(), new UserHandler()); }
Example #13
Source File: KafkaConnectS2IClusterTest.java From strimzi-kafka-operator with Apache License 2.0 | 6 votes |
@Test public void testImagePullSecrets() { LocalObjectReference secret1 = new LocalObjectReference("some-pull-secret"); LocalObjectReference secret2 = new LocalObjectReference("some-other-pull-secret"); KafkaConnectS2I resource = new KafkaConnectS2IBuilder(this.resource) .editSpec() .withNewTemplate() .withNewPod() .withImagePullSecrets(secret1, secret2) .endPod() .endTemplate() .endSpec() .build(); KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().size(), is(2)); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().contains(secret1), is(true)); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().contains(secret2), is(true)); }
Example #14
Source File: OpenShiftServiceImplTest.java From syndesis with Apache License 2.0 | 6 votes |
@Test public void shouldNotExposeUnexposedDeployments() { final DeploymentData deploymentData = new DeploymentData.Builder() .withExposure(EnumSet.noneOf(Exposure.class)) .build(); final String name = "unexposed"; final DeploymentConfig expectedDeploymentConfig = baseDeploymentFor(name, deploymentData) .build(); expectDeploymentOf(name, expectedDeploymentConfig); service.deploy(name, deploymentData); final List<Request> issuedRequests = gatherRequests(); assertThat(issuedRequests).contains(Request.with("POST", "/apis/apps.openshift.io/v1/namespaces/test/deploymentconfigs", expectedDeploymentConfig)); assertThat(issuedRequests).doesNotContain(Request.with("POST", "/apis/route.openshift.io/v1/namespaces/test/routes")); assertThat(issuedRequests).doesNotContain(Request.with("POST", "/api/v1/namespaces/test/services")); }
Example #15
Source File: Readiness.java From kubernetes-client with Apache License 2.0 | 6 votes |
public static boolean isReady(HasMetadata item) { if (item instanceof Deployment) { return isDeploymentReady((Deployment) item); } else if (item instanceof ReplicaSet) { return isReplicaSetReady((ReplicaSet) item); } else if (item instanceof Pod) { return isPodReady((Pod) item); } else if (item instanceof DeploymentConfig) { return isDeploymentConfigReady((DeploymentConfig) item); } else if (item instanceof ReplicationController) { return isReplicationControllerReady((ReplicationController) item); } else if (item instanceof Endpoints) { return isEndpointsReady((Endpoints) item); } else if (item instanceof Node) { return isNodeReady((Node) item); } else if (item instanceof StatefulSet) { return isStatefulSetReady((StatefulSet) item); } else { throw new IllegalArgumentException("Item needs to be one of [Node, Deployment, ReplicaSet, StatefulSet, Pod, DeploymentConfig, ReplicationController], but was: [" + (item != null ? item.getKind() : "Unknown (null)") + "]"); } }
Example #16
Source File: KafkaConnectS2IClusterTest.java From strimzi-kafka-operator with Apache License 2.0 | 6 votes |
@Test public void testResources() { Map<String, Quantity> requests = new HashMap<>(2); requests.put("cpu", new Quantity("250m")); requests.put("memory", new Quantity("512Mi")); Map<String, Quantity> limits = new HashMap<>(2); limits.put("cpu", new Quantity("500m")); limits.put("memory", new Quantity("1024Mi")); KafkaConnectS2I resource = new KafkaConnectS2IBuilder(this.resource) .editSpec() .withResources(new ResourceRequirementsBuilder().withLimits(limits).withRequests(requests).build()) .endSpec() .build(); KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); assertThat(cont.getResources().getLimits(), is(limits)); assertThat(cont.getResources().getRequests(), is(requests)); }
Example #17
Source File: KafkaConnectS2IClusterTest.java From strimzi-kafka-operator with Apache License 2.0 | 6 votes |
@Test public void testGenerateDeploymentWithOAuthWithAccessToken() { KafkaConnectS2I resource = new KafkaConnectS2IBuilder(this.resource) .editSpec() .withAuthentication( new KafkaClientAuthenticationOAuthBuilder() .withNewAccessToken() .withSecretName("my-token-secret") .withKey("my-token-key") .endAccessToken() .build()) .endSpec() .build(); KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); DeploymentConfig dep = kc.generateDeploymentConfig(emptyMap(), true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); assertThat(cont.getEnv().stream().filter(var -> KafkaConnectCluster.ENV_VAR_KAFKA_CONNECT_SASL_MECHANISM.equals(var.getName())).findFirst().orElse(null).getValue(), is("oauth")); assertThat(cont.getEnv().stream().filter(var -> KafkaConnectCluster.ENV_VAR_KAFKA_CONNECT_OAUTH_ACCESS_TOKEN.equals(var.getName())).findFirst().orElse(null).getValueFrom().getSecretKeyRef().getName(), is("my-token-secret")); assertThat(cont.getEnv().stream().filter(var -> KafkaConnectCluster.ENV_VAR_KAFKA_CONNECT_OAUTH_ACCESS_TOKEN.equals(var.getName())).findFirst().orElse(null).getValueFrom().getSecretKeyRef().getKey(), is("my-token-key")); assertThat(cont.getEnv().stream().filter(var -> KafkaConnectCluster.ENV_VAR_KAFKA_CONNECT_OAUTH_CONFIG.equals(var.getName())).findFirst().orElse(null).getValue().isEmpty(), is(true)); }
Example #18
Source File: KafkaConnectS2IClusterTest.java From strimzi-kafka-operator with Apache License 2.0 | 6 votes |
@Test public void testGenerateDeploymentWithOAuthWithRefreshToken() { KafkaConnectS2I resource = new KafkaConnectS2IBuilder(this.resource) .editSpec() .withAuthentication( new KafkaClientAuthenticationOAuthBuilder() .withClientId("my-client-id") .withTokenEndpointUri("http://my-oauth-server") .withNewRefreshToken() .withSecretName("my-token-secret") .withKey("my-token-key") .endRefreshToken() .build()) .endSpec() .build(); KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); DeploymentConfig dep = kc.generateDeploymentConfig(emptyMap(), true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); assertThat(cont.getEnv().stream().filter(var -> KafkaConnectCluster.ENV_VAR_KAFKA_CONNECT_SASL_MECHANISM.equals(var.getName())).findFirst().orElse(null).getValue(), is("oauth")); assertThat(cont.getEnv().stream().filter(var -> KafkaConnectCluster.ENV_VAR_KAFKA_CONNECT_OAUTH_REFRESH_TOKEN.equals(var.getName())).findFirst().orElse(null).getValueFrom().getSecretKeyRef().getName(), is("my-token-secret")); assertThat(cont.getEnv().stream().filter(var -> KafkaConnectCluster.ENV_VAR_KAFKA_CONNECT_OAUTH_REFRESH_TOKEN.equals(var.getName())).findFirst().orElse(null).getValueFrom().getSecretKeyRef().getKey(), is("my-token-key")); assertThat(cont.getEnv().stream().filter(var -> KafkaConnectCluster.ENV_VAR_KAFKA_CONNECT_OAUTH_CONFIG.equals(var.getName())).findFirst().orElse(null).getValue().trim(), is(String.format("%s=\"%s\" %s=\"%s\"", ClientConfig.OAUTH_CLIENT_ID, "my-client-id", ClientConfig.OAUTH_TOKEN_ENDPOINT_URI, "http://my-oauth-server"))); }
Example #19
Source File: KafkaConnectS2IClusterTest.java From strimzi-kafka-operator with Apache License 2.0 | 6 votes |
@Test public void testGenerateDeploymentWithOAuthWithClientSecret() { KafkaConnectS2I resource = new KafkaConnectS2IBuilder(this.resource) .editSpec() .withAuthentication( new KafkaClientAuthenticationOAuthBuilder() .withClientId("my-client-id") .withTokenEndpointUri("http://my-oauth-server") .withNewClientSecret() .withSecretName("my-secret-secret") .withKey("my-secret-key") .endClientSecret() .build()) .endSpec() .build(); KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); DeploymentConfig dep = kc.generateDeploymentConfig(emptyMap(), true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); assertThat(cont.getEnv().stream().filter(var -> KafkaConnectCluster.ENV_VAR_KAFKA_CONNECT_SASL_MECHANISM.equals(var.getName())).findFirst().orElse(null).getValue(), is("oauth")); assertThat(cont.getEnv().stream().filter(var -> KafkaConnectCluster.ENV_VAR_KAFKA_CONNECT_OAUTH_CLIENT_SECRET.equals(var.getName())).findFirst().orElse(null).getValueFrom().getSecretKeyRef().getName(), is("my-secret-secret")); assertThat(cont.getEnv().stream().filter(var -> KafkaConnectCluster.ENV_VAR_KAFKA_CONNECT_OAUTH_CLIENT_SECRET.equals(var.getName())).findFirst().orElse(null).getValueFrom().getSecretKeyRef().getKey(), is("my-secret-key")); assertThat(cont.getEnv().stream().filter(var -> KafkaConnectCluster.ENV_VAR_KAFKA_CONNECT_OAUTH_CONFIG.equals(var.getName())).findFirst().orElse(null).getValue().trim(), is(String.format("%s=\"%s\" %s=\"%s\"", ClientConfig.OAUTH_CLIENT_ID, "my-client-id", ClientConfig.OAUTH_TOKEN_ENDPOINT_URI, "http://my-oauth-server"))); }
Example #20
Source File: DeploymentConfigTest.java From kubernetes-client with Apache License 2.0 | 6 votes |
@Test public void testDeployingLatest() { server.expect().withPath("/apis/apps.openshift.io/v1/namespaces/test/deploymentconfigs/dc1") .andReturn(200, new DeploymentConfigBuilder().withNewMetadata().withName("dc1").endMetadata() .withNewStatus().withLatestVersion(1L).endStatus().build()) .always(); server.expect().patch().withPath("/apis/apps.openshift.io/v1/namespaces/test/deploymentconfigs/dc1") .andReturn(200, new DeploymentConfigBuilder().withNewMetadata().withName("dc1").endMetadata() .withNewStatus().withLatestVersion(2L).endStatus().build()) .once(); OpenShiftClient client = server.getOpenshiftClient(); DeploymentConfig deploymentConfig = client.deploymentConfigs().withName("dc1").deployLatest(); assertNotNull(deploymentConfig); assertEquals(new Long(2), deploymentConfig.getStatus().getLatestVersion()); }
Example #21
Source File: DeploymentConfigOperator.java From strimzi-kafka-operator with Apache License 2.0 | 6 votes |
/** * Check if a deployment configuration has been observed. * * @param namespace The namespace. * @param name The resource name. * @return Whether the deployment has been observed. */ private boolean isObserved(String namespace, String name) { DeploymentConfig dep = get(namespace, name); if (dep != null) { // Get the roll out status // => Sometimes it takes OCP some time before the generations are updated. // So we need to check the conditions in addition to detect such situation. boolean rollOutNotStarting = true; DeploymentCondition progressing = getProgressingCondition(dep); if (progressing != null) { rollOutNotStarting = progressing.getReason() != null && !"Unknown".equals(progressing.getStatus()); } return dep.getMetadata().getGeneration().equals(dep.getStatus().getObservedGeneration()) && rollOutNotStarting; } else { return false; } }
Example #22
Source File: BaseEnricher.java From jkube with Eclipse Public License 2.0 | 6 votes |
/** * This method just makes sure that the replica count provided in XML config * overrides the default option; and resource fragments are always given * topmost priority. * * @param builder kubernetes list builder containing objects * @param xmlResourceConfig xml resource config from plugin configuration * @param defaultValue default value * @return resolved replica count */ protected int getReplicaCount(KubernetesListBuilder builder, ResourceConfig xmlResourceConfig, int defaultValue) { if (xmlResourceConfig != null) { List<HasMetadata> items = builder.buildItems(); for (HasMetadata item : items) { if (item instanceof Deployment && ((Deployment)item).getSpec().getReplicas() != null) { return ((Deployment)item).getSpec().getReplicas(); } if (item instanceof DeploymentConfig && ((DeploymentConfig)item).getSpec().getReplicas() != null) { return ((DeploymentConfig)item).getSpec().getReplicas(); } } return xmlResourceConfig.getReplicas() > 0 ? xmlResourceConfig.getReplicas() : defaultValue; } return defaultValue; }
Example #23
Source File: DeploymentConfigIT.java From kubernetes-client with Apache License 2.0 | 5 votes |
@Test public void update() { ReadyEntity<DeploymentConfig> deploymentConfigReady = new ReadyEntity<>(DeploymentConfig.class, client, "deploymentconfig1", currentNamespace); deploymentConfig1 = client.deploymentConfigs().inNamespace(currentNamespace).withName("deploymentconfig1").edit() .editSpec().withReplicas(3).endSpec().done(); await().atMost(60, TimeUnit.SECONDS).until(deploymentConfigReady); assertThat(deploymentConfig1).isNotNull(); assertEquals(3, deploymentConfig1.getSpec().getReplicas().intValue()); }
Example #24
Source File: DeploymentConfigOperator.java From strimzi-kafka-operator with Apache License 2.0 | 5 votes |
@Override protected Integer currentScale(String namespace, String name) { DeploymentConfig deploymentConfig = get(namespace, name); if (deploymentConfig != null) { return deploymentConfig.getSpec().getReplicas(); } else { return null; } }
Example #25
Source File: KafkaConnectS2IClusterTest.java From strimzi-kafka-operator with Apache License 2.0 | 5 votes |
@Test public void testExternalConfigurationInvalidVolumes() { ExternalConfigurationVolumeSource volume = new ExternalConfigurationVolumeSourceBuilder() .withName("my-volume") .withConfigMap(new ConfigMapVolumeSourceBuilder().withName("my-map").build()) .withSecret(new SecretVolumeSourceBuilder().withSecretName("my-secret").build()) .build(); KafkaConnectS2I resource = new KafkaConnectS2IBuilder(this.resource) .editSpec() .withNewExternalConfiguration() .withVolumes(volume) .endExternalConfiguration() .endSpec() .build(); KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); // Check Deployment DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); List<Volume> volumes = dep.getSpec().getTemplate().getSpec().getVolumes(); List<Volume> selected = volumes.stream().filter(vol -> vol.getName().equals(KafkaConnectCluster.EXTERNAL_CONFIGURATION_VOLUME_NAME_PREFIX + "my-volume")).collect(Collectors.toList()); assertThat(selected.size(), is(0)); List<VolumeMount> volumeMounths = dep.getSpec().getTemplate().getSpec().getContainers().get(0).getVolumeMounts(); List<VolumeMount> selectedVolumeMounths = volumeMounths.stream().filter(vol -> vol.getName().equals(KafkaConnectCluster.EXTERNAL_CONFIGURATION_VOLUME_NAME_PREFIX + "my-volume")).collect(Collectors.toList()); assertThat(selected.size(), is(0)); }
Example #26
Source File: DeploymentConfigOperationsImpl.java From kubernetes-client with Apache License 2.0 | 5 votes |
@Override public DeploymentConfig replace(DeploymentConfig item) { if (isCascading()) { return cascading(false).replace(item); } return super.replace(item); }
Example #27
Source File: DeploymentConfigIT.java From kubernetes-client with Apache License 2.0 | 5 votes |
@After public void cleanup() throws InterruptedException { client.deploymentConfigs().inNamespace(currentNamespace).delete(); DeleteEntity<DeploymentConfig> deploymentConfigDelete = new DeleteEntity<>(DeploymentConfig.class, client, "deploymentconfig1", currentNamespace); await().atMost(90, TimeUnit.SECONDS).until(deploymentConfigDelete); }
Example #28
Source File: IntegrationDeploymentHandlerTest.java From syndesis with Apache License 2.0 | 5 votes |
@Test public void shouldSetVersionTo1ForInitialUpdate() { final SecurityContext security = mock(SecurityContext.class); final Principal principal = mock(Principal.class); when(security.getUserPrincipal()).thenReturn(principal); when(principal.getName()).thenReturn("user"); final Integration integration = new Integration.Builder().build(); when(dataManager.fetch(Integration.class, INTEGRATION_ID)).thenReturn(integration); Map<String, String> labels = new HashMap<>(); List<DeploymentConfig> emptyList = new ArrayList<>(); when(openShiftService.getDeploymentsByLabel(labels)).thenReturn(emptyList); when(dataManager.fetchAllByPropertyValue(IntegrationDeployment.class, "integrationId", INTEGRATION_ID)) .thenReturn(Stream.empty()); handler.update(security, INTEGRATION_ID); final IntegrationDeployment expected = new IntegrationDeployment.Builder().id(compositeId(INTEGRATION_ID, 1)) .spec(integration).version(1).userId("user").build(); verify(dataManager).create(argThat(new ArgumentMatcher<IntegrationDeployment>() { @Override public boolean matches(IntegrationDeployment given) { return expected.builder() .createdAt(given.getCreatedAt()) // ignore created at and .updatedAt(given.getUpdatedAt()) // updated at .build() .equals(given); } })); }
Example #29
Source File: TestSupportHandler.java From syndesis with Apache License 2.0 | 5 votes |
@GET @Path("/delete-deployments") public void deleteDeployments() { final String user = context.getRemoteUser(); LOG.warn("user {} is deleting all integration deploymets", user); final List<DeploymentConfig> integrationDeployments = openShiftService.getDeploymentsByLabel(Collections.singletonMap(OpenShiftService.INTEGRATION_ID_LABEL, null)); for (DeploymentConfig integrationDeployment : integrationDeployments) { final String integrationDeploymentName = integrationDeployment.getMetadata().getName().replaceFirst("^i-", ""); LOG.debug("Deleting integration \"{}\"", integrationDeploymentName); openShiftService.delete(integrationDeploymentName); } LOG.warn("user {} deleted all integration deploymets", user); }
Example #30
Source File: OpenShiftServiceImplTest.java From syndesis with Apache License 2.0 | 5 votes |
void expectDeploymentOf(final String name, final DeploymentConfig expectedDeploymentConfig) { final DeploymentConfig deployed = new DeploymentConfigBuilder(expectedDeploymentConfig) .withNewStatus() .withLatestVersion(1L) .endStatus() .build(); server.expect() .get() .withPath("/apis/apps.openshift.io/v1/namespaces/test/deploymentconfigs/" + openshiftName(name)) .andReturn(404, new StatusBuilder().withCode(404).build()) .times(1); server.expect() .get() .withPath("/apis/apps.openshift.io/v1/namespaces/test/deploymentconfigs/" + openshiftName(name)) .andReturn(200, deployed) .always(); server.expect() .patch() .withPath("/apis/apps.openshift.io/v1/namespaces/test/deploymentconfigs/" + openshiftName(name)) .andReturn(200, deployed) .always(); server.expect() .post() .withPath("/apis/apps.openshift.io/v1/namespaces/test/deploymentconfigs") .andReturn(200, expectedDeploymentConfig) .always(); }