From fbfbee4d0f94f844094a4c496c756d249f282119 Mon Sep 17 00:00:00 2001 From: Lukas Kral Date: Wed, 15 Apr 2026 16:34:07 +0200 Subject: [PATCH 1/4] replace test-clients' builders from the operators repository with the ones from test-clients repository Signed-off-by: Lukas Kral --- pom.xml | 7 + systemtest/pom.xml | 4 + .../kafkaclients/ClientsAuthentication.java | 123 ++++ .../internalClients/BaseClients.java | 55 -- .../internalClients/BridgeClients.java | 245 -------- .../internalClients/BridgeTracingClients.java | 105 ---- .../internalClients/KafkaClients.java | 541 ------------------ .../internalClients/KafkaOauthClients.java | 296 ---------- .../internalClients/KafkaTracingClients.java | 215 ------- .../systemtest/storage/TestStorage.java | 5 - .../systemtest/utils/AdminClientUtils.java | 6 +- .../strimzi/systemtest/utils/ClientUtils.java | 372 ------------ .../systemtest/bridge/HttpBridgeST.java | 62 +- .../bridge/HttpBridgeScramShaST.java | 56 +- .../bridge/HttpBridgeServerTlsST.java | 89 +-- .../systemtest/bridge/HttpBridgeTlsST.java | 77 ++- .../systemtest/connect/ConnectBuilderST.java | 33 +- .../strimzi/systemtest/connect/ConnectST.java | 183 ++++-- .../CruiseControlConfigurationST.java | 17 +- .../cruisecontrol/CruiseControlST.java | 39 +- .../systemtest/kafka/ConfigProviderST.java | 18 +- .../systemtest/kafka/KafkaNodePoolST.java | 21 +- .../io/strimzi/systemtest/kafka/KafkaST.java | 104 +++- .../systemtest/kafka/KafkaVersionsST.java | 53 +- .../io/strimzi/systemtest/kafka/QuotasST.java | 42 +- .../systemtest/kafka/TieredStorageST.java | 58 +- .../kafka/listeners/ListenersST.java | 458 +++++++++------ .../kafka/listeners/MultipleListenersST.java | 24 +- .../strimzi/systemtest/metrics/MetricsST.java | 36 +- .../metrics/StrimziMetricsReporterST.java | 18 +- .../mirrormaker/MirrorMaker2ST.java | 406 ++++++++----- .../systemtest/operators/FeatureGatesST.java | 18 +- .../operators/MultipleClusterOperatorsST.java | 16 +- .../NamespaceDeletionRecoveryST.java | 60 +- .../systemtest/operators/PodSetST.java | 21 +- .../systemtest/operators/RecoveryST.java | 22 +- .../topic/TopicReplicasChangeST.java | 27 +- .../systemtest/operators/topic/TopicST.java | 78 ++- .../systemtest/operators/user/UserST.java | 129 +++-- .../AlternativeReconcileTriggersST.java | 141 +++-- .../rollingupdate/KafkaRollerST.java | 30 +- .../rollingupdate/RollingUpdateST.java | 88 ++- .../security/NetworkPoliciesST.java | 50 +- .../security/PodSecurityProfilesST.java | 73 ++- .../systemtest/security/SecurityST.java | 209 +++++-- .../security/custom/CustomAuthorizerST.java | 54 +- .../security/custom/CustomCaChainST.java | 136 +++-- .../security/custom/CustomCaST.java | 51 +- .../security/oauth/OauthAuthorizationST.java | 134 ++--- .../security/oauth/OauthPlainST.java | 151 ++--- .../systemtest/specific/AccessOperatorST.java | 14 +- .../systemtest/specific/DrainCleanerST.java | 20 +- .../systemtest/specific/RackAwarenessST.java | 69 ++- .../systemtest/tracing/OpenTelemetryST.java | 212 +++++-- .../upgrade/AbstractKRaftUpgradeST.java | 48 +- .../upgrade/KRaftKafkaUpgradeDowngradeST.java | 16 +- .../systemtest/upgrade/KRaftOlmUpgradeST.java | 12 +- .../watcher/AbstractNamespaceST.java | 40 +- 58 files changed, 2661 insertions(+), 3026 deletions(-) create mode 100644 systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/ClientsAuthentication.java delete mode 100644 systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/internalClients/BaseClients.java delete mode 100644 systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/internalClients/BridgeClients.java delete mode 100644 systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/internalClients/BridgeTracingClients.java delete mode 100644 systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/internalClients/KafkaClients.java delete mode 100644 systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/internalClients/KafkaOauthClients.java delete mode 100644 systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/internalClients/KafkaTracingClients.java diff --git a/pom.xml b/pom.xml index 86071c216fc..61b470ab444 100644 --- a/pom.xml +++ b/pom.xml @@ -121,6 +121,7 @@ 0.6.0 0.0.15 0.2.0 + 0.13.0 false ${skipTests} @@ -793,6 +794,12 @@ api ${access-operator.version} + + io.strimzi.test-clients + builders + 0.14.0-SNAPSHOT + provided + diff --git a/systemtest/pom.xml b/systemtest/pom.xml index f5c0200c2ce..c060f042943 100644 --- a/systemtest/pom.xml +++ b/systemtest/pom.xml @@ -261,6 +261,10 @@ com.marcnuri.helm-java helm-java + + io.strimzi.test-clients + builders + diff --git a/systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/ClientsAuthentication.java b/systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/ClientsAuthentication.java new file mode 100644 index 00000000000..e600cce7959 --- /dev/null +++ b/systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/ClientsAuthentication.java @@ -0,0 +1,123 @@ +/* + * Copyright Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.systemtest.kafkaclients; + +import io.fabric8.kubernetes.api.model.EnvVar; +import io.fabric8.kubernetes.api.model.EnvVarBuilder; +import io.skodjob.kubetest4j.resources.KubeResourceManager; +import io.strimzi.api.kafka.model.kafka.KafkaResources; +import io.strimzi.operator.common.Util; +import io.strimzi.systemtest.keycloak.KeycloakInstance; +import io.strimzi.testclients.configuration.Authentication; +import io.strimzi.testclients.configuration.AuthenticationBuilder; +import org.apache.kafka.common.security.auth.SecurityProtocol; + +import java.util.ArrayList; +import java.util.List; + +public class ClientsAuthentication { + public static Authentication configureTlsScramSha(String namespaceName, String userName, String clusterName) { + return configureScramSha(namespaceName, userName, SecurityProtocol.SASL_SSL) + .withNewSsl() + .withSslTruststoreCertificate(KafkaResources.clusterCaCertificateSecretName(clusterName)) + .endSsl() + .build(); + } + + public static Authentication configurePlainScramSha(String namespaceName, String userName) { + return configureScramSha(namespaceName, userName, SecurityProtocol.SASL_PLAINTEXT).build(); + } + + public static AuthenticationBuilder configureScramSha(String namespaceName, String userName, SecurityProtocol securityProtocol) { + final String saslJaasConfigEncrypted = KubeResourceManager.get().kubeClient().getClient().secrets().inNamespace(namespaceName).withName(userName).get().getData().get("sasl.jaas.config"); + final String saslJaasConfigDecrypted = Util.decodeFromBase64(saslJaasConfigEncrypted); + + return new AuthenticationBuilder() + .withNewSasl() + .withSaslJaasConfig(saslJaasConfigDecrypted) + .withSaslMechanism("SCRAM-SHA-512") + .endSasl() + .withSecurityProtocol(securityProtocol.toString()); + } + + public static Authentication configureTls(String clusterName, String userName) { + return configureTls(KafkaResources.clusterCaCertificateSecretName(clusterName), userName, userName); + } + + public static Authentication configureTlsCustomCerts(String caCertificateSecretName, String keystoreSecretName) { + return configureTls(caCertificateSecretName, keystoreSecretName, keystoreSecretName); + } + + private static Authentication configureTls(String caCertificateSecretName, String keystoreKeySecretName, String keystoreCertificateChainSecretName) { + return new AuthenticationBuilder() + .withNewSsl() + .withSslTruststoreCertificate(caCertificateSecretName) + .withSslKeystoreKey(keystoreKeySecretName) + .withSslKeystoreCertificateChain(keystoreCertificateChainSecretName) + .endSsl() + .withNewSasl() + .withSaslMechanism("GSSAPI") + .endSasl() + .withSecurityProtocol(SecurityProtocol.SSL.toString()) + .build(); + } + + public static Authentication configureTlsOAuth(String clusterName, String oauthClientId, String oauthClientSecret, String oauthTokenEndpointUri) { + EnvVar oauthSslEndpointEnvVar = new EnvVarBuilder() + .withName("OAUTH_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM") + .withValue("") + .build(); + + return configureOAuth(oauthClientId, oauthClientSecret, oauthTokenEndpointUri, List.of(oauthSslEndpointEnvVar)) + .withNewSsl() + .withSslTruststoreCertificate(KafkaResources.clusterCaCertificateSecretName(clusterName)) + .withSslKeystoreKey(oauthClientId) + .withSslKeystoreCertificateChain(oauthClientId) + .endSsl() + .build(); + } + + public static Authentication configureOAuthPlain(String oauthClientId, String oauthClientSecret, String oauthTokenEndpointUri) { + return configureOAuth(oauthClientId, oauthClientSecret, oauthTokenEndpointUri, null).build(); + } + + private static AuthenticationBuilder configureOAuth(String oauthClientId, String oauthClientSecret, String oauthTokenEndpointUri, List additionalEnvVars) { + List envVars = new ArrayList<>(List.of( + new EnvVarBuilder() + .withName("OAUTH_SSL_TRUSTSTORE_CERTIFICATES") + .withNewValueFrom() + .withNewSecretKeyRef() + .withName(KeycloakInstance.KEYCLOAK_SECRET_NAME) + .withKey(KeycloakInstance.KEYCLOAK_SECRET_CERT) + .endSecretKeyRef() + .endValueFrom() + .build(), + new EnvVarBuilder() + .withName("OAUTH_SSL_TRUSTSTORE_TYPE") + .withValue("PEM") + .build(), + new EnvVarBuilder() + .withName("OAUTH_CLIENT_SECRET") + .editOrNewValueFrom() + .withNewSecretKeyRef() + .withName(oauthClientSecret) + .withKey("clientSecret") + .endSecretKeyRef() + .endValueFrom() + .build() + )); + + if (additionalEnvVars != null) { + envVars.addAll(additionalEnvVars); + } + + return new AuthenticationBuilder() + .withNewOauth() + .withOauthClientId(oauthClientId) + .withOauthTokenEndpointUri(oauthTokenEndpointUri) + .withAdditionalOAuthEnvVars(envVars) + .endOauth(); + } +} diff --git a/systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/internalClients/BaseClients.java b/systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/internalClients/BaseClients.java deleted file mode 100644 index d45435fceb4..00000000000 --- a/systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/internalClients/BaseClients.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.systemtest.kafkaclients.internalClients; - -import io.sundr.builder.annotations.Buildable; - -import java.security.InvalidParameterException; - -@Buildable(editableEnabled = false) -public abstract class BaseClients { - private String bootstrapAddress; - private String topicName; - private String additionalConfig; - private String namespaceName; - - public String getBootstrapAddress() { - return bootstrapAddress; - } - - public void setBootstrapAddress(String bootstrapAddress) { - if (bootstrapAddress == null || bootstrapAddress.isEmpty()) { - throw new InvalidParameterException("Bootstrap server is not set."); - } - this.bootstrapAddress = bootstrapAddress; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - if (topicName == null || topicName.isEmpty()) { - throw new InvalidParameterException("Topic name is not set."); - } - this.topicName = topicName; - } - - public String getNamespaceName() { - return namespaceName; - } - - public void setNamespaceName(String namespaceName) { - this.namespaceName = namespaceName; - } - - public String getAdditionalConfig() { - return additionalConfig; - } - - public void setAdditionalConfig(String additionalConfig) { - this.additionalConfig = (additionalConfig == null || additionalConfig.isEmpty()) ? "" : additionalConfig; - } -} diff --git a/systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/internalClients/BridgeClients.java b/systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/internalClients/BridgeClients.java deleted file mode 100644 index e1c7a2355cf..00000000000 --- a/systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/internalClients/BridgeClients.java +++ /dev/null @@ -1,245 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.systemtest.kafkaclients.internalClients; - -import io.fabric8.kubernetes.api.model.EnvVar; -import io.fabric8.kubernetes.api.model.EnvVarBuilder; -import io.fabric8.kubernetes.api.model.LabelSelector; -import io.fabric8.kubernetes.api.model.LabelSelectorBuilder; -import io.fabric8.kubernetes.api.model.LocalObjectReference; -import io.fabric8.kubernetes.api.model.PodSpecBuilder; -import io.fabric8.kubernetes.api.model.batch.v1.Job; -import io.fabric8.kubernetes.api.model.batch.v1.JobBuilder; -import io.strimzi.api.kafka.model.bridge.KafkaBridgeResources; -import io.strimzi.systemtest.Environment; -import io.strimzi.systemtest.TestConstants; -import io.strimzi.systemtest.utils.kubeUtils.objects.NetworkPolicyUtils; -import io.sundr.builder.annotations.Buildable; - -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -@Buildable(editableEnabled = false) -public class BridgeClients extends KafkaClients { - private String componentName; - private int pollInterval; - private int port; - - public String getComponentName() { - return componentName; - } - - public void setComponentName(String componentName) { - this.componentName = componentName; - } - - public int getPollInterval() { - return pollInterval; - } - - public void setPollInterval(int pollInterval) { - this.pollInterval = pollInterval; - } - - public int getPort() { - return port; - } - - public void setPort(int port) { - this.port = port; - } - - private void createNetworkPoliciesIfNeeded(String clientName, Map clientLabels) { - // We need to create network policies if default policy is to deny traffic - if (Environment.DEFAULT_TO_DENY_NETWORK_POLICIES) { - LabelSelector producerLabelSelector = new LabelSelectorBuilder() - .addToMatchLabels(clientLabels) - .build(); - - NetworkPolicyUtils.allowNetworkPolicySettingsForBridgeClients(this.getNamespaceName(), clientName, producerLabelSelector, this.getComponentName()); - } - } - - public JobBuilder defaultProducerStrimziBridge() { - Map producerLabels = new HashMap<>(); - producerLabels.put("app", this.getProducerName()); - producerLabels.put(TestConstants.KAFKA_CLIENTS_LABEL_KEY, TestConstants.KAFKA_BRIDGE_CLIENTS_LABEL_VALUE); - - PodSpecBuilder podSpecBuilder = new PodSpecBuilder(); - - if (Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET != null && !Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET.isEmpty()) { - List imagePullSecrets = Collections.singletonList(new LocalObjectReference(Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET)); - podSpecBuilder.withImagePullSecrets(imagePullSecrets); - } - - createNetworkPoliciesIfNeeded(this.getProducerName(), producerLabels); - - return new JobBuilder() - .withNewMetadata() - .withNamespace(this.getNamespaceName()) - .withLabels(producerLabels) - .withName(this.getProducerName()) - .endMetadata() - .withNewSpec() - .withBackoffLimit(0) - .withNewTemplate() - .withNewMetadata() - .withLabels(producerLabels) - .endMetadata() - .withNewSpecLike(podSpecBuilder.build()) - .withRestartPolicy("Never") - .addNewContainer() - .withName(this.getProducerName()) - .withImagePullPolicy(TestConstants.IF_NOT_PRESENT_IMAGE_PULL_POLICY) - .withImage(Environment.TEST_CLIENTS_IMAGE) - .addNewEnv() - .withName("HOSTNAME") - .withValue(this.getBootstrapAddress()) - .endEnv() - .addNewEnv() - .withName("PORT") - .withValue(Integer.toString(port)) - .endEnv() - .addNewEnv() - .withName("TOPIC") - .withValue(this.getTopicName()) - .endEnv() - .addNewEnv() - .withName("DELAY_MS") - .withValue(String.valueOf(this.getDelayMs())) - .endEnv() - .addNewEnv() - .withName("MESSAGE_COUNT") - .withValue(Integer.toString(this.getMessageCount())) - .endEnv() - .addNewEnv() - .withName("CLIENT_TYPE") - .withValue("HttpProducer") - .endEnv() - .endContainer() - .endSpec() - .endTemplate() - .endSpec(); - } - - - public Job producerStrimziBridge() { - return this.defaultProducerStrimziBridge().build(); - } - - public Job producerTlsStrimziBridge(final String clusterName) { - return defaultProducerStrimziBridge() - .editSpec() - .editTemplate() - .editSpec() - .editFirstContainer() - .addToEnv(this.getCaCertEnv(clusterName)) - .endContainer() - .endSpec() - .endTemplate() - .endSpec() - .build(); - } - - public JobBuilder defaultConsumerStrimziBridge() { - Map consumerLabels = new HashMap<>(); - consumerLabels.put("app", this.getConsumerName()); - consumerLabels.put(TestConstants.KAFKA_CLIENTS_LABEL_KEY, TestConstants.KAFKA_BRIDGE_CLIENTS_LABEL_VALUE); - - PodSpecBuilder podSpecBuilder = new PodSpecBuilder(); - - if (Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET != null && !Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET.isEmpty()) { - List imagePullSecrets = Collections.singletonList(new LocalObjectReference(Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET)); - podSpecBuilder.withImagePullSecrets(imagePullSecrets); - } - - createNetworkPoliciesIfNeeded(this.getConsumerName(), consumerLabels); - - return new JobBuilder() - .withNewMetadata() - .withNamespace(this.getNamespaceName()) - .withLabels(consumerLabels) - .withName(this.getConsumerName()) - .endMetadata() - .withNewSpec() - .withBackoffLimit(0) - .withNewTemplate() - .withNewMetadata() - .withLabels(consumerLabels) - .endMetadata() - .withNewSpecLike(podSpecBuilder.build()) - .withRestartPolicy("Never") - .addNewContainer() - .withName(this.getConsumerName()) - .withImagePullPolicy(TestConstants.IF_NOT_PRESENT_IMAGE_PULL_POLICY) - .withImage(Environment.TEST_CLIENTS_IMAGE) - .addNewEnv() - .withName("HOSTNAME") - .withValue(this.getBootstrapAddress()) - .endEnv() - .addNewEnv() - .withName("PORT") - .withValue(Integer.toString(port)) - .endEnv() - .addNewEnv() - .withName("TOPIC") - .withValue(this.getTopicName()) - .endEnv() - .addNewEnv() - .withName("POLL_INTERVAL") - .withValue(Integer.toString(pollInterval)) - .endEnv() - .addNewEnv() - .withName("MESSAGE_COUNT") - .withValue(Integer.toString(this.getMessageCount())) - .endEnv() - .addNewEnv() - .withName("GROUP_ID") - .withValue(this.getConsumerGroup()) - .endEnv() - .addNewEnv() - .withName("CLIENT_TYPE") - .withValue("HttpConsumer") - .endEnv() - .endContainer() - .endSpec() - .endTemplate() - .endSpec(); - } - public Job consumerStrimziBridge() { - return this.defaultConsumerStrimziBridge().build(); - } - - public Job consumerTlsStrimziBridge(final String clusterName) { - return defaultConsumerStrimziBridge() - .editSpec() - .editTemplate() - .editSpec() - .editFirstContainer() - .addToEnv(this.getCaCertEnv(clusterName)) - .endContainer() - .endSpec() - .endTemplate() - .endSpec() - .build(); - } - - protected EnvVar getCaCertEnv(String clusterName) { - final String caSecretName = this.getCaCertSecretName() == null || this.getCaCertSecretName().isEmpty() ? - KafkaBridgeResources.serviceName(clusterName) : this.getCaCertSecretName(); - - return new EnvVarBuilder() - .withName("CA_CRT") - .withNewValueFrom() - .withNewSecretKeyRef() - .withName(caSecretName) - .withKey("ca.crt") - .endSecretKeyRef() - .endValueFrom() - .build(); - } -} diff --git a/systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/internalClients/BridgeTracingClients.java b/systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/internalClients/BridgeTracingClients.java deleted file mode 100644 index 3b1029b7e99..00000000000 --- a/systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/internalClients/BridgeTracingClients.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.systemtest.kafkaclients.internalClients; - -import io.fabric8.kubernetes.api.model.batch.v1.Job; -import io.strimzi.systemtest.tracing.TracingConstants; -import io.sundr.builder.annotations.Buildable; - -@Buildable(editableEnabled = false) -public class BridgeTracingClients extends BridgeClients { - - private String tracingServiceNameEnvVar; - private boolean openTelemetry = false; - private String tracingType; - - public String getTracingServiceNameEnvVar() { - - return tracingServiceNameEnvVar; - } - - public void setTracingServiceNameEnvVar(String tracingServiceNameEnvVar) { - this.tracingServiceNameEnvVar = tracingServiceNameEnvVar; - } - - private String serviceNameEnvVar() { - // use tracing service name env var if set, else use "dummy" - return tracingServiceNameEnvVar != null ? tracingServiceNameEnvVar : "TEST_SERVICE_NAME"; - } - - public boolean isOpenTelemetry() { - return openTelemetry; - } - - public void setOpenTelemetry(boolean openTelemetry) { - this.openTelemetry = openTelemetry; - } - - public void setTracingType(String tracingType) { - // if `withOpenTelemetry` or `withOpenTracing` is used, this is the only way how to set it also as the tracingType - // to remove need of extra check in each client's method - if (this.openTelemetry) { - this.tracingType = TracingConstants.OPEN_TELEMETRY; - } else { - this.tracingType = tracingType; - } - } - - public String getTracingType() { - return tracingType; - } - - public Job producerStrimziBridgeWithTracing() { - return this.defaultProducerStrimziBridge() - .editSpec() - .editTemplate() - .editSpec() - .editFirstContainer() - .addNewEnv() - .withName(this.serviceNameEnvVar()) - .withValue(this.getProducerName()) - .endEnv() - // this will only get used if tracing is enabled -- see serviceNameEnvVar() - .addNewEnv() - .withName("OTEL_EXPORTER_OTLP_ENDPOINT") - .withValue(TracingConstants.JAEGER_COLLECTOR_OTLP_URL) - .endEnv() - .addNewEnv() - .withName("TRACING_TYPE") - .withValue(this.tracingType) - .endEnv() - .endContainer() - .endSpec() - .endTemplate() - .endSpec() - .build(); - } - - public Job consumerStrimziBridgeWithTracing() { - return this.defaultConsumerStrimziBridge() - .editSpec() - .editTemplate() - .editSpec() - .editFirstContainer() - .addNewEnv() - .withName(this.serviceNameEnvVar()) - .withValue(this.getConsumerName()) - .endEnv() - // this will only get used if tracing is enabled -- see serviceNameEnvVar() - .addNewEnv() - .withName("OTEL_EXPORTER_OTLP_ENDPOINT") - .withValue(TracingConstants.JAEGER_COLLECTOR_OTLP_URL) - .endEnv() - .addNewEnv() - .withName("TRACING_TYPE") - .withValue(this.tracingType) - .endEnv() - .endContainer() - .endSpec() - .endTemplate() - .endSpec() - .build(); - } -} diff --git a/systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/internalClients/KafkaClients.java b/systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/internalClients/KafkaClients.java deleted file mode 100644 index ae282c3dd7d..00000000000 --- a/systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/internalClients/KafkaClients.java +++ /dev/null @@ -1,541 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.systemtest.kafkaclients.internalClients; - -import io.fabric8.kubernetes.api.model.EnvVar; -import io.fabric8.kubernetes.api.model.EnvVarBuilder; -import io.fabric8.kubernetes.api.model.LocalObjectReference; -import io.fabric8.kubernetes.api.model.PodSpecBuilder; -import io.fabric8.kubernetes.api.model.batch.v1.Job; -import io.fabric8.kubernetes.api.model.batch.v1.JobBuilder; -import io.skodjob.kubetest4j.resources.KubeResourceManager; -import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.operator.common.Util; -import io.strimzi.systemtest.Environment; -import io.strimzi.systemtest.TestConstants; -import io.strimzi.systemtest.enums.PodSecurityProfile; -import io.strimzi.systemtest.utils.ClientUtils; -import io.sundr.builder.annotations.Buildable; -import org.apache.kafka.common.security.auth.SecurityProtocol; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; - -import java.security.InvalidParameterException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -@Buildable(editableEnabled = false) -public class KafkaClients extends BaseClients { - private static final Logger LOGGER = LogManager.getLogger(KafkaClients.class); - - private String producerName; - private String consumerName; - private String message; - private int messageCount; - private String consumerGroup; - private long delayMs; - private String username; - private String caCertSecretName; - private String headers; - private PodSecurityProfile podSecurityPolicy; - private String messagesPerTransaction; - - public String getProducerName() { - return producerName; - } - - public void setProducerName(String producerName) { - this.producerName = producerName; - } - - public String getConsumerName() { - return consumerName; - } - - public void setConsumerName(String consumerName) { - this.consumerName = consumerName; - } - - public String getMessage() { - return message; - } - - public void setMessage(String message) { - if (message == null || message.isEmpty()) { - message = "Hello-world"; - } - this.message = message; - } - - public int getMessageCount() { - return messageCount; - } - - public void setMessageCount(int messageCount) { - if (messageCount <= 0) { - throw new InvalidParameterException("Message count is less than 1"); - } - this.messageCount = messageCount; - } - - public String getConsumerGroup() { - return consumerGroup; - } - - public void generateNewConsumerGroup() { - final String newConsumerGroup = ClientUtils.generateRandomConsumerGroup(); - LOGGER.info("Regenerating new consumer group {} for clients {} {}", newConsumerGroup, this.getProducerName(), this.getConsumerName()); - this.setConsumerGroup(newConsumerGroup); - } - - public void setConsumerGroup(String consumerGroup) { - if (consumerGroup == null || consumerGroup.isEmpty()) { - LOGGER.info("Consumer group were not specified going to create the random one"); - consumerGroup = ClientUtils.generateRandomConsumerGroup(); - } - this.consumerGroup = consumerGroup; - } - - public long getDelayMs() { - return delayMs; - } - - public void setDelayMs(long delayMs) { - this.delayMs = delayMs; - } - - public String getUsername() { - return username; - } - - public void setUsername(String username) { - this.username = username; - } - - public String getCaCertSecretName() { - return caCertSecretName; - } - - public void setCaCertSecretName(String caCertSecretName) { - this.caCertSecretName = caCertSecretName; - } - - public String getHeaders() { - return headers; - } - - public void setHeaders(String headers) { - this.headers = headers; - } - - public PodSecurityProfile getPodSecurityPolicy() { - return this.podSecurityPolicy; - } - public void setPodSecurityPolicy(final PodSecurityProfile podSecurityPolicy) { - this.podSecurityPolicy = podSecurityPolicy; - } - - public void setMessagesPerTransaction(String messagesPerTransaction) { - this.messagesPerTransaction = messagesPerTransaction; - } - - public String getMessagesPerTransaction() { - return messagesPerTransaction; - } - - public Job producerStrimzi() { - return defaultProducerStrimzi().build(); - } - - public Job producerScramShaPlainStrimzi() { - this.configureScramSha(SecurityProtocol.SASL_PLAINTEXT); - return defaultProducerStrimzi().build(); - } - - public Job producerScramShaTlsStrimzi(final String clusterName) { - this.configureScramSha(SecurityProtocol.SASL_SSL); - - return defaultProducerStrimzi() - .editSpec() - .editTemplate() - .editSpec() - .editFirstContainer() - .addToEnv(this.getClusterCaCertEnv(clusterName)) - .endContainer() - .endSpec() - .endTemplate() - .endSpec() - .build(); - } - - public Job producerTlsStrimzi(final String clusterName) { - List tlsEnvVars = new ArrayList<>(); - tlsEnvVars.add(this.getClusterCaCertEnv(clusterName)); - tlsEnvVars.addAll(this.getTlsEnvVars()); - - return producerTlsStrimziWithTlsEnvVars(tlsEnvVars); - } - - public Job producerTlsStrimziWithTlsEnvVars(List tlsEnvVars) { - this.configureTls(); - - return defaultProducerStrimzi() - .editSpec() - .editTemplate() - .editSpec() - .editFirstContainer() - .addAllToEnv(tlsEnvVars) - .endContainer() - .endSpec() - .endTemplate() - .endSpec() - .build(); - } - - public JobBuilder defaultProducerStrimzi() { - if (producerName == null || producerName.isEmpty()) { - throw new InvalidParameterException("Producer name is not set."); - } - - Map producerLabels = new HashMap<>(); - producerLabels.put("app", producerName); - producerLabels.put(TestConstants.KAFKA_CLIENTS_LABEL_KEY, TestConstants.KAFKA_CLIENTS_LABEL_VALUE); - - PodSpecBuilder podSpecBuilder = new PodSpecBuilder(); - - if (Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET != null && !Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET.isEmpty()) { - List imagePullSecrets = Collections.singletonList(new LocalObjectReference(Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET)); - podSpecBuilder.withImagePullSecrets(imagePullSecrets); - } - - final JobBuilder builder = new JobBuilder() - .withNewMetadata() - .withNamespace(this.getNamespaceName()) - .withLabels(producerLabels) - .withName(producerName) - .endMetadata() - .withNewSpec() - .withBackoffLimit(0) - .withNewTemplate() - .withNewMetadata() - .withName(producerName) - .withNamespace(this.getNamespaceName()) - .withLabels(producerLabels) - .endMetadata() - .withNewSpecLike(podSpecBuilder.build()) - .withRestartPolicy("Never") - .withContainers() - .addNewContainer() - .withName(producerName) - .withImagePullPolicy(TestConstants.IF_NOT_PRESENT_IMAGE_PULL_POLICY) - .withImage(Environment.TEST_CLIENTS_IMAGE) - .addNewEnv() - .withName("BOOTSTRAP_SERVERS") - .withValue(this.getBootstrapAddress()) - .endEnv() - .addNewEnv() - .withName("TOPIC") - .withValue(this.getTopicName()) - .endEnv() - .addNewEnv() - .withName("DELAY_MS") - .withValue(String.valueOf(delayMs)) - .endEnv() - .addNewEnv() - .withName("LOG_LEVEL") - .withValue("DEBUG") - .endEnv() - .addNewEnv() - .withName("MESSAGE_COUNT") - .withValue(String.valueOf(messageCount)) - .endEnv() - .addNewEnv() - .withName("MESSAGE") - .withValue(message) - .endEnv() - .addNewEnv() - .withName("PRODUCER_ACKS") - .withValue("all") - .endEnv() - .addNewEnv() - .withName("ADDITIONAL_CONFIG") - .withValue(this.getAdditionalConfig()) - .endEnv() - .addNewEnv() - .withName("BLOCKING_PRODUCER") - .withValue("true") - .endEnv() - .addNewEnv() - .withName("CLIENT_TYPE") - .withValue("KafkaProducer") - .endEnv() - .endContainer() - .endSpec() - .endTemplate() - .endSpec(); - - if (this.getHeaders() != null) { - builder - .editSpec() - .editTemplate() - .editSpec() - .editFirstContainer() - .addNewEnv() - .withName("HEADERS") - .withValue(this.getHeaders()) - .endEnv() - .endContainer() - .endSpec() - .endTemplate() - .endSpec(); - } - - if (this.getMessagesPerTransaction() != null) { - builder - .editSpec() - .editTemplate() - .editSpec() - .editFirstContainer() - .addNewEnv() - .withName("MESSAGES_PER_TRANSACTION") - .withValue(this.getMessagesPerTransaction()) - .endEnv() - .endContainer() - .endSpec() - .endTemplate() - .endSpec(); - } - - if (PodSecurityProfile.RESTRICTED == this.podSecurityPolicy) { - this.enableRestrictedProfile(builder); - } - - return builder; - } - - public Job consumerScramShaPlainStrimzi() { - this.configureScramSha(SecurityProtocol.SASL_PLAINTEXT); - return defaultConsumerStrimzi().build(); - } - - public Job consumerScramShaTlsStrimzi(final String clusterName) { - this.configureScramSha(SecurityProtocol.SASL_SSL); - - return defaultConsumerStrimzi() - .editSpec() - .editTemplate() - .editSpec() - .editFirstContainer() - .addToEnv(this.getClusterCaCertEnv(clusterName)) - .endContainer() - .endSpec() - .endTemplate() - .endSpec() - .build(); - } - - public Job consumerTlsStrimzi(final String clusterName) { - List tlsEnvVars = new ArrayList<>(); - tlsEnvVars.add(this.getClusterCaCertEnv(clusterName)); - tlsEnvVars.addAll(this.getTlsEnvVars()); - - return consumerTlsStrimziWithTlsEnvVars(tlsEnvVars); - } - - public Job consumerTlsStrimziWithTlsEnvVars(final List tlsEnvVars) { - this.configureTls(); - - return defaultConsumerStrimzi() - .editSpec() - .editTemplate() - .editSpec() - .editFirstContainer() - .addAllToEnv(tlsEnvVars) - .endContainer() - .endSpec() - .endTemplate() - .endSpec() - .build(); - } - - public Job consumerStrimzi() { - return defaultConsumerStrimzi().build(); - } - - public JobBuilder defaultConsumerStrimzi() { - if (consumerName == null || consumerName.isEmpty()) { - throw new InvalidParameterException("Consumer name is not set."); - } - - Map consumerLabels = new HashMap<>(); - consumerLabels.put("app", consumerName); - consumerLabels.put(TestConstants.KAFKA_CLIENTS_LABEL_KEY, TestConstants.KAFKA_CLIENTS_LABEL_VALUE); - - PodSpecBuilder podSpecBuilder = new PodSpecBuilder(); - - if (Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET != null && !Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET.isEmpty()) { - List imagePullSecrets = Collections.singletonList(new LocalObjectReference(Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET)); - podSpecBuilder.withImagePullSecrets(imagePullSecrets); - } - - final JobBuilder builder = new JobBuilder() - .withNewMetadata() - .withNamespace(this.getNamespaceName()) - .withLabels(consumerLabels) - .withName(consumerName) - .endMetadata() - .withNewSpec() - .withBackoffLimit(0) - .withNewTemplate() - .withNewMetadata() - .withLabels(consumerLabels) - .withNamespace(this.getNamespaceName()) - .withName(consumerName) - .endMetadata() - .withNewSpecLike(podSpecBuilder.build()) - .withRestartPolicy("Never") - .withContainers() - .addNewContainer() - .withName(consumerName) - .withImagePullPolicy(TestConstants.IF_NOT_PRESENT_IMAGE_PULL_POLICY) - .withImage(Environment.TEST_CLIENTS_IMAGE) - .addNewEnv() - .withName("BOOTSTRAP_SERVERS") - .withValue(this.getBootstrapAddress()) - .endEnv() - .addNewEnv() - .withName("TOPIC") - .withValue(this.getTopicName()) - .endEnv() - .addNewEnv() - .withName("DELAY_MS") - .withValue(String.valueOf(delayMs)) - .endEnv() - .addNewEnv() - .withName("LOG_LEVEL") - .withValue("DEBUG") - .endEnv() - .addNewEnv() - .withName("MESSAGE_COUNT") - .withValue(String.valueOf(messageCount)) - .endEnv() - .addNewEnv() - .withName("GROUP_ID") - .withValue(consumerGroup) - .endEnv() - .addNewEnv() - .withName("ADDITIONAL_CONFIG") - .withValue(this.getAdditionalConfig()) - .endEnv() - .addNewEnv() - .withName("CLIENT_TYPE") - .withValue("KafkaConsumer") - .endEnv() - .endContainer() - .endSpec() - .endTemplate() - .endSpec(); - - if (PodSecurityProfile.RESTRICTED == this.podSecurityPolicy) { - this.enableRestrictedProfile(builder); - } - return builder; - } - - protected EnvVar getClusterCaCertEnv(String clusterName) { - final String caSecretName = this.getCaCertSecretName() == null || this.getCaCertSecretName().isEmpty() ? - KafkaResources.clusterCaCertificateSecretName(clusterName) : this.getCaCertSecretName(); - - return new EnvVarBuilder() - .withName("CA_CRT") - .withNewValueFrom() - .withNewSecretKeyRef() - .withName(caSecretName) - .withKey("ca.crt") - .endSecretKeyRef() - .endValueFrom() - .build(); - } - - final protected void configureScramSha(SecurityProtocol securityProtocol) { - if (this.getUsername() == null || this.getUsername().isEmpty()) { - throw new InvalidParameterException("User name for SCRAM-SHA is not set"); - } - - final String saslJaasConfigEncrypted = KubeResourceManager.get().kubeClient().getClient().secrets().inNamespace(this.getNamespaceName()).withName(this.getUsername()).get().getData().get("sasl.jaas.config"); - final String saslJaasConfigDecrypted = Util.decodeFromBase64(saslJaasConfigEncrypted); - - this.setAdditionalConfig(this.getAdditionalConfig() + - // scram-sha - "sasl.mechanism=SCRAM-SHA-512\n" + - "security.protocol=" + securityProtocol + "\n" + - "sasl.jaas.config=" + saslJaasConfigDecrypted); - } - - final protected void configureTls() { - this.setAdditionalConfig(this.getAdditionalConfig() + - "sasl.mechanism=GSSAPI\n" + - "security.protocol=" + SecurityProtocol.SSL + "\n"); - } - - protected List getTlsEnvVars() { - if (this.getUsername() == null || this.getUsername().isEmpty()) { - throw new InvalidParameterException("User name for TLS is not set"); - } - - EnvVar userCrt = new EnvVarBuilder() - .withName("USER_CRT") - .withNewValueFrom() - .withNewSecretKeyRef() - .withName(this.getUsername()) - .withKey("user.crt") - .endSecretKeyRef() - .endValueFrom() - .build(); - - EnvVar userKey = new EnvVarBuilder() - .withName("USER_KEY") - .withNewValueFrom() - .withNewSecretKeyRef() - .withName(this.getUsername()) - .withKey("user.key") - .endSecretKeyRef() - .endValueFrom() - .build(); - - return List.of(userCrt, userKey); - } - - private void enableRestrictedProfile(final JobBuilder jobBuilder) { - jobBuilder - .editSpec() - .editTemplate() - .editSpec() - .withNewSecurityContext() - .withRunAsNonRoot(true) - .withNewSeccompProfile() - .withType("RuntimeDefault") - .endSeccompProfile() - .endSecurityContext() - .editFirstContainer() - .withNewSecurityContext() - .withAllowPrivilegeEscalation(false) - .withNewCapabilities() - .withDrop("ALL") - .endCapabilities() - .withRunAsNonRoot(true) - .withNewSeccompProfile() - .withType("RuntimeDefault") - .endSeccompProfile() - .endSecurityContext() - .endContainer() - .endSpec() - .endTemplate() - .endSpec(); - } -} diff --git a/systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/internalClients/KafkaOauthClients.java b/systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/internalClients/KafkaOauthClients.java deleted file mode 100644 index d03c2917c64..00000000000 --- a/systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/internalClients/KafkaOauthClients.java +++ /dev/null @@ -1,296 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.systemtest.kafkaclients.internalClients; - -import io.fabric8.kubernetes.api.model.batch.v1.Job; -import io.fabric8.kubernetes.api.model.batch.v1.JobBuilder; -import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.systemtest.keycloak.KeycloakInstance; -import io.sundr.builder.annotations.Buildable; - -import java.security.InvalidParameterException; - -@Buildable(editableEnabled = false) -public class KafkaOauthClients extends KafkaClients { - private String oauthClientId; - private String oauthProducerClientId; - private String oauthConsumerClientId; - private String oauthClientSecret; - private String oauthProducerSecret; - private String oauthConsumerSecret; - private String oauthTokenEndpointUri; - private String clientUserName; - - public String getOauthClientId() { - return oauthClientId; - } - - public void setOauthClientId(String oauthClientId) { - this.oauthClientId = oauthClientId; - } - - public String getOauthProducerClientId() { - return oauthProducerClientId; - } - - public void setOauthProducerClientId(String oauthProducerClientId) { - this.oauthProducerClientId = oauthProducerClientId; - } - - public String getOauthConsumerClientId() { - return oauthConsumerClientId; - } - - public void setOauthConsumerClientId(String oauthConsumerClientId) { - this.oauthConsumerClientId = oauthConsumerClientId; - } - - public String getOauthProducerSecret() { - return oauthProducerSecret; - } - - public void setOauthProducerSecret(String oauthProducerSecret) { - this.oauthProducerSecret = oauthProducerSecret; - } - - public String getOauthConsumerSecret() { - return oauthConsumerSecret; - } - - public void setOauthConsumerSecret(String oauthConsumerSecret) { - this.oauthConsumerSecret = oauthConsumerSecret; - } - - public String getOauthClientSecret() { - return oauthClientSecret; - } - - public void setOauthClientSecret(String oauthClientSecret) { - this.oauthClientSecret = oauthClientSecret; - } - - public String getOauthTokenEndpointUri() { - return oauthTokenEndpointUri; - } - - public void setOauthTokenEndpointUri(String oauthTokenEndpointUri) { - if (oauthTokenEndpointUri == null || oauthTokenEndpointUri.isEmpty()) { - throw new InvalidParameterException("OAuth token endpoint url is not set."); - } - this.oauthTokenEndpointUri = oauthTokenEndpointUri; - } - - public String getClientUserName() { - return clientUserName; - } - - public void setClientUserName(String clientUserName) { - this.clientUserName = clientUserName; - } - - public Job producerStrimziOauthPlain() { - return defaultProducerStrimziOauthPlain().build(); - } - - private JobBuilder defaultProducerStrimziOauthPlain() { - checkParameters(); - return defaultProducerStrimzi() - .editSpec() - .editTemplate() - .editSpec() - .editFirstContainer() - .addNewEnv() - .withName("OAUTH_CLIENT_ID") - .withValue(oauthProducerClientId != null ? oauthProducerClientId : oauthClientId) - .endEnv() - .addNewEnv() - .withName("OAUTH_CLIENT_SECRET") - .editOrNewValueFrom() - .withNewSecretKeyRef() - .withName(oauthProducerSecret != null ? oauthProducerSecret : oauthClientSecret) - .withKey("clientSecret") - .endSecretKeyRef() - .endValueFrom() - .endEnv() - .addNewEnv() - .withName("OAUTH_TOKEN_ENDPOINT_URI") - .withValue(oauthTokenEndpointUri) - .endEnv() - .addNewEnv() - .withName("OAUTH_SSL_TRUSTSTORE_CERTIFICATES") - .editOrNewValueFrom() - .withNewSecretKeyRef() - .withName(KeycloakInstance.KEYCLOAK_SECRET_NAME) - .withKey(KeycloakInstance.KEYCLOAK_SECRET_CERT) - .endSecretKeyRef() - .endValueFrom() - .endEnv() - .addNewEnv() - .withName("OAUTH_SSL_TRUSTSTORE_TYPE") - .withValue("PEM") - .endEnv() - .endContainer() - .endSpec() - .endTemplate() - .endSpec(); - } - - public Job producerStrimziOauthTls(String clusterName) { - - return defaultProducerStrimziOauthPlain() - .editSpec() - .editTemplate() - .editSpec() - .editFirstContainer() - .addNewEnv() - // disable hostname verification - .withName("OAUTH_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM") - .withValue("") - .endEnv() - .addNewEnv() - .withName("CA_CRT") - .withNewValueFrom() - .withNewSecretKeyRef() - .withName(KafkaResources.clusterCaCertificateSecretName(clusterName)) - .withKey("ca.crt") - .endSecretKeyRef() - .endValueFrom() - .endEnv() - .addNewEnv() - .withName("USER_CRT") - .withNewValueFrom() - .withNewSecretKeyRef() - .withName(oauthProducerClientId != null ? oauthProducerClientId : clientUserName) - .withKey("user.crt") - .endSecretKeyRef() - .endValueFrom() - .endEnv() - .addNewEnv() - .withName("USER_KEY") - .withNewValueFrom() - .withNewSecretKeyRef() - .withName(oauthProducerClientId != null ? oauthProducerClientId : clientUserName) - .withKey("user.key") - .endSecretKeyRef() - .endValueFrom() - .endEnv() - .endContainer() - .endSpec() - .endTemplate() - .endSpec() - .build(); - } - - public Job consumerStrimziOauthPlain() { - return defaultConsumerStrimziOauth().build(); - } - - private JobBuilder defaultConsumerStrimziOauth() { - checkParameters(); - return defaultConsumerStrimzi() - .editSpec() - .editTemplate() - .editSpec() - .editFirstContainer() - .addNewEnv() - .withName("OAUTH_CLIENT_ID") - .withValue(oauthConsumerClientId != null ? oauthConsumerClientId : oauthClientId) - .endEnv() - .addNewEnv() - .withName("OAUTH_CLIENT_SECRET") - .editOrNewValueFrom() - .withNewSecretKeyRef() - .withName(oauthConsumerSecret != null ? oauthConsumerSecret : oauthClientSecret) - .withKey("clientSecret") - .endSecretKeyRef() - .endValueFrom() - .endEnv() - .addNewEnv() - .withName("OAUTH_TOKEN_ENDPOINT_URI") - .withValue(oauthTokenEndpointUri) - .endEnv() - .addNewEnv() - .withName("OAUTH_SSL_TRUSTSTORE_CERTIFICATES") - .editOrNewValueFrom() - .withNewSecretKeyRef() - .withName(KeycloakInstance.KEYCLOAK_SECRET_NAME) - .withKey(KeycloakInstance.KEYCLOAK_SECRET_CERT) - .endSecretKeyRef() - .endValueFrom() - .endEnv() - .addNewEnv() - .withName("OAUTH_SSL_TRUSTSTORE_TYPE") - .withValue("PEM") - .endEnv() - .addNewEnv() - .withName("LOG_LEVEL") - .withValue("DEBUG") - .endEnv() - .endContainer() - .endSpec() - .endTemplate() - .endSpec(); - } - - public Job consumerStrimziOauthTls(String clusterName) { - - return defaultConsumerStrimziOauth() - .editSpec() - .editTemplate() - .editSpec() - .editFirstContainer() - .addNewEnv() - // disable hostname verification - .withName("OAUTH_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM") - .withValue("") - .endEnv() - .addNewEnv() - .withName("CA_CRT") - .withNewValueFrom() - .withNewSecretKeyRef() - .withName(KafkaResources.clusterCaCertificateSecretName(clusterName)) - .withKey("ca.crt") - .endSecretKeyRef() - .endValueFrom() - .endEnv() - .addNewEnv() - .withName("USER_CRT") - .withNewValueFrom() - .withNewSecretKeyRef() - .withName(oauthConsumerClientId != null ? oauthConsumerClientId : clientUserName) - .withKey("user.crt") - .endSecretKeyRef() - .endValueFrom() - .endEnv() - .addNewEnv() - .withName("USER_KEY") - .withNewValueFrom() - .withNewSecretKeyRef() - .withName(oauthConsumerClientId != null ? oauthConsumerClientId : clientUserName) - .withKey("user.key") - .endSecretKeyRef() - .endValueFrom() - .endEnv() - .endContainer() - .endSpec() - .endTemplate() - .endSpec() - .build(); - } - - private void checkParameters() { - if ((this.getOauthClientId() == null || this.getOauthClientId().isEmpty()) && - (this.getOauthConsumerClientId() == null && this.getOauthProducerClientId() == null)) { - throw new InvalidParameterException("OAuth clientId is not set."); - } - if (this.getOauthClientSecret() == null || this.getOauthClientSecret().isEmpty() && - (this.getOauthProducerSecret() == null && this.getOauthConsumerSecret() == null)) { - throw new InvalidParameterException("OAuth client Secret is not set."); - } - if (this.getClientUserName() == null || this.getClientUserName().isEmpty()) { - this.setClientUserName(this.getOauthClientId()); - } - } -} diff --git a/systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/internalClients/KafkaTracingClients.java b/systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/internalClients/KafkaTracingClients.java deleted file mode 100644 index 4c74743ad24..00000000000 --- a/systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/internalClients/KafkaTracingClients.java +++ /dev/null @@ -1,215 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.systemtest.kafkaclients.internalClients; - -import io.fabric8.kubernetes.api.model.LocalObjectReference; -import io.fabric8.kubernetes.api.model.PodSpecBuilder; -import io.fabric8.kubernetes.api.model.batch.v1.Job; -import io.fabric8.kubernetes.api.model.batch.v1.JobBuilder; -import io.strimzi.systemtest.Environment; -import io.strimzi.systemtest.tracing.TracingConstants; -import io.sundr.builder.annotations.Buildable; - -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -@Buildable(editableEnabled = false) -public class KafkaTracingClients extends KafkaClients { - private String jaegerServiceProducerName; - private String jaegerServiceConsumerName; - private String jaegerServiceStreamsName; - private String streamsTopicTargetName; - private String tracingServiceNameEnvVar; - private boolean openTelemetry = false; - private String tracingType; - - public String getJaegerServiceConsumerName() { - return jaegerServiceConsumerName; - } - - public void setJaegerServiceConsumerName(String jaegerServiceConsumerName) { - this.jaegerServiceConsumerName = jaegerServiceConsumerName; - } - - public String getJaegerServiceProducerName() { - return jaegerServiceProducerName; - } - - public void setJaegerServiceProducerName(String jaegerServiceProducerName) { - this.jaegerServiceProducerName = jaegerServiceProducerName; - } - - public String getJaegerServiceStreamsName() { - return jaegerServiceStreamsName; - } - - public void setJaegerServiceStreamsName(String jaegerServiceStreamsName) { - this.jaegerServiceStreamsName = jaegerServiceStreamsName; - } - - public String getStreamsTopicTargetName() { - return streamsTopicTargetName; - } - - public void setStreamsTopicTargetName(String streamsTopicTargetName) { - this.streamsTopicTargetName = streamsTopicTargetName; - } - - public String getTracingServiceNameEnvVar() { - return tracingServiceNameEnvVar; - } - - public void setTracingServiceNameEnvVar(String tracingServiceNameEnvVar) { - this.tracingServiceNameEnvVar = tracingServiceNameEnvVar; - } - - public void setOpenTelemetry(boolean openTelemetry) { - this.openTelemetry = openTelemetry; - } - - public boolean getOpenTelemetry() { - return openTelemetry; - } - - public void setTracingType(String tracingType) { - // if `withOpenTelemetry` or `withOpenTracing` is used, this is the only way how to set it also as the tracingType - // to remove need of extra check in each client's method - if (this.openTelemetry) { - this.tracingType = TracingConstants.OPEN_TELEMETRY; - } else { - this.tracingType = tracingType; - } - } - - public String getTracingType() { - return tracingType; - } - - public Job consumerWithTracing() { - return defaultConsumerStrimzi() - .editSpec() - .editTemplate() - .editSpec() - .editFirstContainer() - .addNewEnv() - .withName(this.tracingServiceNameEnvVar) - .withValue(this.jaegerServiceConsumerName) - .endEnv() - .addNewEnv() - .withName("TRACING_TYPE") - .withValue(this.tracingType) - .endEnv() - .addNewEnv() - .withName("OTEL_EXPORTER_OTLP_ENDPOINT") - .withValue(TracingConstants.JAEGER_COLLECTOR_OTLP_URL) - .endEnv() - .endContainer() - .endSpec() - .endTemplate() - .endSpec() - .build(); - } - - public Job producerWithTracing() { - return defaultProducerStrimzi() - .editSpec() - .editTemplate() - .editSpec() - .editFirstContainer() - .addNewEnv() - .withName(this.tracingServiceNameEnvVar) - .withValue(this.jaegerServiceProducerName) - .endEnv() - .addNewEnv() - .withName("TRACING_TYPE") - .withValue(this.tracingType) - .endEnv() - .addNewEnv() - .withName("OTEL_EXPORTER_OTLP_ENDPOINT") - .withValue(TracingConstants.JAEGER_COLLECTOR_OTLP_URL) - .endEnv() - .endContainer() - .endSpec() - .endTemplate() - .endSpec() - .build(); - } - - public Job kafkaStreamsWithTracing() { - String kafkaStreamsName = "hello-world-streams"; - - Map kafkaStreamLabels = new HashMap<>(); - kafkaStreamLabels.put("app", kafkaStreamsName); - - PodSpecBuilder podSpecBuilder = new PodSpecBuilder(); - - if (Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET != null && !Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET.isEmpty()) { - List imagePullSecrets = Collections.singletonList(new LocalObjectReference(Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET)); - podSpecBuilder.withImagePullSecrets(imagePullSecrets); - } - - return new JobBuilder() - .withNewMetadata() - .withNamespace(getNamespaceName()) - .withLabels(kafkaStreamLabels) - .withName(kafkaStreamsName) - .endMetadata() - .withNewSpec() - .withBackoffLimit(0) - .withNewTemplate() - .withNewMetadata() - .withLabels(kafkaStreamLabels) - .endMetadata() - .withNewSpecLike(podSpecBuilder.build()) - .withRestartPolicy("Never") - .withContainers() - .addNewContainer() - .withName(kafkaStreamsName) - .withImage(Environment.TEST_CLIENTS_IMAGE) - .addNewEnv() - .withName("BOOTSTRAP_SERVERS") - .withValue(this.getBootstrapAddress()) - .endEnv() - .addNewEnv() - .withName("APPLICATION_ID") - .withValue(kafkaStreamsName) - .endEnv() - .addNewEnv() - .withName("SOURCE_TOPIC") - .withValue(this.getTopicName()) - .endEnv() - .addNewEnv() - .withName("TARGET_TOPIC") - .withValue(this.streamsTopicTargetName) - .endEnv() - .addNewEnv() - .withName("LOG_LEVEL") - .withValue("DEBUG") - .endEnv() - .addNewEnv() - .withName(this.tracingServiceNameEnvVar) - .withValue(this.jaegerServiceStreamsName) - .endEnv() - .addNewEnv() - .withName("OTEL_EXPORTER_OTLP_ENDPOINT") - .withValue(TracingConstants.JAEGER_COLLECTOR_OTLP_URL) - .endEnv() - .addNewEnv() - .withName("TRACING_TYPE") - .withValue(this.tracingType) - .endEnv() - .addNewEnv() - .withName("CLIENT_TYPE") - .withValue("KafkaStreams") - .endEnv() - .endContainer() - .endSpec() - .endTemplate() - .endSpec() - .build(); - } -} diff --git a/systemtest/src/main/java/io/strimzi/systemtest/storage/TestStorage.java b/systemtest/src/main/java/io/strimzi/systemtest/storage/TestStorage.java index 95b9f3830b6..b22a81dad84 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/storage/TestStorage.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/storage/TestStorage.java @@ -12,7 +12,6 @@ import io.strimzi.api.kafka.model.nodepool.ProcessRoles; import io.strimzi.systemtest.Environment; import io.strimzi.systemtest.TestConstants; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaTracingClients; import io.strimzi.systemtest.labels.LabelSelectors; import io.strimzi.systemtest.resources.crd.KafkaComponents; import io.strimzi.systemtest.utils.StUtils; @@ -382,10 +381,6 @@ public long getTestExecutionTimeInSeconds() { return Duration.ofMillis(System.currentTimeMillis() - getTestExecutionStartTime()).getSeconds(); } - public KafkaTracingClients getTracingClients() { - return (KafkaTracingClients) extensionContext.getStore(ExtensionContext.Namespace.GLOBAL).get(TestConstants.KAFKA_TRACING_CLIENT_KEY); - } - public String getScraperPodName() { return extensionContext.getStore(ExtensionContext.Namespace.GLOBAL).get(TestConstants.SCRAPER_POD_KEY).toString(); } diff --git a/systemtest/src/main/java/io/strimzi/systemtest/utils/AdminClientUtils.java b/systemtest/src/main/java/io/strimzi/systemtest/utils/AdminClientUtils.java index d4ef094fc12..92cef6d087a 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/utils/AdminClientUtils.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/utils/AdminClientUtils.java @@ -11,7 +11,6 @@ import io.fabric8.kubernetes.api.model.LabelSelectorBuilder; import io.skodjob.kubetest4j.resources.KubeResourceManager; import io.strimzi.systemtest.TestConstants; -import io.strimzi.systemtest.enums.DeploymentTypes; import io.strimzi.systemtest.kafkaclients.internalClients.admin.AdminClient; import io.strimzi.test.TestUtils; import org.apache.logging.log4j.LogManager; @@ -95,10 +94,7 @@ public static AdminClient getConfiguredAdminClient(String namespaceName, String */ private static LabelSelector getLabelSelector(String adminName) { Map matchLabels = new HashMap<>(); - matchLabels.put(TestConstants.APP_POD_LABEL, TestConstants.ADMIN_CLIENT_NAME); - matchLabels.put(TestConstants.KAFKA_ADMIN_CLIENT_LABEL_KEY, TestConstants.KAFKA_ADMIN_CLIENT_LABEL_VALUE); - matchLabels.put(TestConstants.DEPLOYMENT_TYPE, DeploymentTypes.AdminClient.name()); - matchLabels.put(TestConstants.APP_CONTROLLER_LABEL, adminName); + matchLabels.put(TestConstants.APP_POD_LABEL, adminName); return new LabelSelectorBuilder() .withMatchLabels(matchLabels) diff --git a/systemtest/src/main/java/io/strimzi/systemtest/utils/ClientUtils.java b/systemtest/src/main/java/io/strimzi/systemtest/utils/ClientUtils.java index 3d8d90abfe3..53ca81b02cf 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/utils/ClientUtils.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/utils/ClientUtils.java @@ -4,11 +4,7 @@ */ package io.strimzi.systemtest.utils; -import io.strimzi.api.kafka.model.kafka.KafkaResources; import io.strimzi.systemtest.TestConstants; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder; -import io.strimzi.systemtest.storage.TestStorage; import io.strimzi.systemtest.utils.kubeUtils.controllers.JobUtils; import io.strimzi.test.TestUtils; import io.strimzi.test.WaitException; @@ -31,53 +27,6 @@ public class ClientUtils { // ensuring that object can not be created outside of class private ClientUtils() {} - /** - * Waits for both the instant producer and consumer clients to succeed, automatically deleting the associated jobs afterward. - * {@link TestStorage#getProducerName()} is used for identifying producer Job and {@link TestStorage#getConsumerName()} - * for identifying consumer Job. - * - * @param testStorage The {@link TestStorage} instance containing details about the clients' names. - */ - public static void waitForInstantClientSuccess(TestStorage testStorage) { - waitForInstantClientSuccess(testStorage, true); - } - - /** - * Waits for both the instant producer and consumer clients to succeed, optionally deleting jobs afterward. - * {@link TestStorage#getProducerName()} is used for identifying producer Job and - * {@link TestStorage#getConsumerName()} for identifying consumer Job. - * - * @param testStorage The {@link TestStorage} instance containing details about the clients' names. - * @param deleteAfterSuccess Indicates whether jobs should be deleted after successful completion. - */ - public static void waitForInstantClientSuccess(TestStorage testStorage, boolean deleteAfterSuccess) { - waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount(), deleteAfterSuccess); - } - - /** - * Waits for both the continuous producer and consumer clients to succeed, automatically deleting the associated jobs afterward. - * {@link TestStorage#getContinuousProducerName()} is used for identifying producer Job and - * {@link TestStorage#getContinuousConsumerName()} for identifying consumer Job. The timeout while waiting is directly proportional - * to the number of messages. - * - * @param testStorage The {@link TestStorage} instance containing details about the clients' names. - * @param messageCount The expected number of messages to be transmitted. - */ - public static void waitForContinuousClientSuccess(TestStorage testStorage, int messageCount) { - waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getContinuousConsumerName(), testStorage.getContinuousProducerName(), messageCount, true); - } - - /** - * Waits for both the continuous producer and consumer clients to succeed, with default number of messages expected to be transmitted. - * {@link TestStorage#getContinuousProducerName()} is used for identifying producer Job and - * {@link TestStorage#getContinuousConsumerName()} for identifying consumer Job. - * - * @param testStorage The {@link TestStorage} instance containing details about the clients' names. - */ - public static void waitForContinuousClientSuccess(TestStorage testStorage) { - waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getContinuousConsumerName(), testStorage.getContinuousProducerName(), testStorage.getContinuousMessageCount(), true); - } - public static void waitForClientsSuccess(String namespaceName, String consumerName, String producerName, int messageCount) { waitForClientsSuccess(namespaceName, consumerName, producerName, messageCount, true); } @@ -99,38 +48,6 @@ public static void waitForClientsSuccess(String namespaceName, String consumerNa // Client success - /** - * Waits for the instant consumer client to succeed, automatically deleting the associated job afterward. - * {@link TestStorage#getProducerName()} is used for identifying producer Job and - * {@link TestStorage#getConsumerName()} for identifying consumer Job. - * - * @param testStorage The {@link TestStorage} instance containing details about the client's name. - */ - public static void waitForInstantConsumerClientSuccess(TestStorage testStorage) { - waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); - } - - /** - * Waits for the instant producer client to succeed with explicitly specified namespace automatically deleting the associated job afterward. - * - * @param namespaceName Explicit namespace name. - * @param testStorage The {@link TestStorage} instance containing details about the client's name. - */ - public static void waitForInstantProducerClientSuccess(String namespaceName, TestStorage testStorage) { - waitForClientSuccess(namespaceName, testStorage.getProducerName(), testStorage.getMessageCount()); - } - - /** - * Waits for the instant producer client to succeed, automatically deleting the associated job afterward. - * {@link TestStorage#getProducerName()} is used for identifying producer Job and - * {@link TestStorage#getConsumerName()} for identifying consumer Job. - * - * @param testStorage The {@link TestStorage} instance containing details about the client's name. - */ - public static void waitForInstantProducerClientSuccess(TestStorage testStorage) { - waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); - } - public static void waitForClientSuccess(String namespaceName, String jobName, int messageCount) { waitForClientSuccess(namespaceName, jobName, messageCount, true); } @@ -148,48 +65,6 @@ public static void waitForClientSuccess(String namespaceName, String jobName, in // Client timeouts - /** - * Waits only for instant producer to timeout, automatically deleting the associated job afterward. - * {@link TestStorage#getProducerName()} is used for identifying producer Job. - * - * @param testStorage The {@link TestStorage} instance containing details about the client's name. - */ - public static void waitForInstantProducerClientTimeout(TestStorage testStorage) { - waitForInstantProducerClientTimeout(testStorage, true); - } - - /** - * Waits only for instant producer to timeout, optionally deleting jobs afterward. - * {@link TestStorage#getProducerName()} is used for identifying producer Job and - * - * @param testStorage The {@link TestStorage} instance contains details about client's name. - * @param deleteAfterSuccess Indicates whether producer job should be deleted after timeout. - */ - public static void waitForInstantProducerClientTimeout(TestStorage testStorage, boolean deleteAfterSuccess) { - waitForClientTimeout(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount(), deleteAfterSuccess); - } - - /** - * Waits only for instant consumer to timeout, automatically deleting the associated job afterward. - * {@link TestStorage#getConsumerName()} is used for identifying consumer Job. - * - * @param testStorage The {@link TestStorage} instance contains details about client's name. - */ - public static void waitForInstantConsumerClientTimeout(TestStorage testStorage) { - waitForInstantConsumerClientTimeout(testStorage, true); - } - - /** - * Waits only for instant consumer to timeout, automatically deleting the associated job afterward. - * {@link TestStorage#getConsumerName()} is used for identifying consumer Job. - * - * @param testStorage The {@link TestStorage} instance contains details about client's name. - * @param deleteAfterSuccess Indicates whether consumer job should be deleted after timeout. - */ - public static void waitForInstantConsumerClientTimeout(TestStorage testStorage, boolean deleteAfterSuccess) { - waitForClientTimeout(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount(), deleteAfterSuccess); - } - public static void waitForClientTimeout(String namespaceName, String jobName, int messageCount) { waitForClientTimeout(namespaceName, jobName, messageCount, true); } @@ -219,17 +94,6 @@ public static void waitForClientTimeout(String namespaceName, String jobName, in // Both clients timeouts - /** - * Waits only for instant consumer and producer to timeout, automatically deleting the associated jobs afterward. - * {@link TestStorage#getProducerName()} is used for identifying producer Job and - * {@link TestStorage#getConsumerName()} for identifying consumer Job. - * - * @param testStorage The {@link TestStorage} instance contains details about client's name. - */ - public static void waitForInstantClientsTimeout(TestStorage testStorage) { - waitForClientsTimeout(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); - } - public static void waitForClientsTimeout(String namespaceName, String consumerName, String producerName, int messageCount) { waitForClientsTimeout(namespaceName, consumerName, producerName, messageCount, true); } @@ -275,241 +139,5 @@ public static String generateRandomConsumerGroup() { return CONSUMER_GROUP_NAME + salt; } - - ////////////////////////////////// - // instant Plain client builders (ScramSha, TLS, plain) - ///////////////////////////////// - - /** - * Creates and configures a {@link KafkaClientsBuilder} instance for instant Kafka clients based on test storage settings. - * This base configuration sets up the namespace, message count, delay, topic name, producer name, and consumer name - * for Kafka clients. {@link TestStorage#getProducerName()} is used for naming producer Job and - * {@link TestStorage#getConsumerName()} for naming consumer Job. Finally, {@link TestStorage#getTopicName()} - * is used as Topic target by attempted message transition. The default message count is set to 100, and the delay in milliseconds - * is set to 0, indicating messages will be sent practically instantly. Returned builder can be modified as desired. - * - * @param testStorage The {@link TestStorage} instance containing configuration details - * - * @return A configured {@link KafkaClientsBuilder} instance ready for further customization or immediate use - * for creating Kafka producer and consumer clients. - */ - private static KafkaClientsBuilder instantClientBuilderBase(TestStorage testStorage) { - return new KafkaClientsBuilder() - .withNamespaceName(testStorage.getNamespaceName()) - .withMessageCount(testStorage.getMessageCount()) // default 100 - .withDelayMs(0) - .withTopicName(testStorage.getTopicName()) - .withProducerName(testStorage.getProducerName()) - .withConsumerName(testStorage.getConsumerName()); - } - - // Instant ScramSha client builders - - /** - * Generates a {@link KafkaClientsBuilder} for instant Kafka clients using scram_sha over plain communication. - * {@link TestStorage#getClusterName()} is with port 9092 is used to generate kafka bootstrap address. - * - * @param testStorage The {@link TestStorage} instance providing necessary configurations. - * @return A configured {@link KafkaClientsBuilder} instance for instant clients with plain communication setup. - */ - public static KafkaClientsBuilder getInstantScramShaOverPlainClientBuilder(TestStorage testStorage) { - return getInstantScramShaClientBuilder(testStorage, KafkaResources.plainBootstrapAddress(testStorage.getClusterName())); - } - - /** - * Generates a {@link KafkaClientsBuilder} for instant Kafka clients using scram_sha over tls communication. - * {@link TestStorage#getClusterName()} is with port 9093 is used to generate kafka bootstrap address. - * - * @param testStorage The {@link TestStorage} instance providing necessary configurations. - * @return A configured {@link KafkaClientsBuilder} instance for instant clients with tls communication setup. - */ - public static KafkaClientsBuilder getInstantScramShaOverTlsClientBuilder(TestStorage testStorage) { - return getInstantScramShaClientBuilder(testStorage, KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())); - } - - /** - * Generates a {@link KafkaClientsBuilder} for instant Kafka clients using specified bootstrap (plain or TLS). - * - * @param testStorage The {@link TestStorage} instance providing necessary configurations. - * @param bootstrapServer is the exact address including port (e.g., source-cluster-kafka-bootstrap:9095) - * @return A configured {@link KafkaClientsBuilder} instance for instant clients with plain communication setup. - */ - public static KafkaClientsBuilder getInstantScramShaClientBuilder(TestStorage testStorage, String bootstrapServer) { - return getInstantPlainClientBuilder(testStorage, bootstrapServer) - .withUsername(testStorage.getUsername()); - } - - // instant Plain client builders - - /** - * Generates a {@link KafkaClientsBuilder} for instant Kafka clients using plain communication (non-TLS). - * {@link TestStorage#getClusterName()} and port 9092 are used to generate kafka bootstrap address. - * - * @param testStorage The {@link TestStorage} instance providing necessary configurations. - * @return A configured {@link KafkaClientsBuilder} instance for instant clients with plain communication setup. - */ - public static KafkaClientsBuilder getInstantPlainClientBuilder(TestStorage testStorage) { - return getInstantPlainClientBuilder(testStorage, KafkaResources.plainBootstrapAddress(testStorage.getClusterName())); - } - - /** - * Generates a {@link KafkaClientsBuilder} for instant Kafka clients using plain communication (non-TLS), - * extending the base configuration with the Kafka cluster's plain bootstrap address. - * {@link TestStorage#getProducerName()} is used for naming producer Job and - * {@link TestStorage#getConsumerName()} for naming consumer Job. Finally, - * {@link TestStorage#getTopicName()} is used as Topic target by attempted message transition. - * - * @param testStorage The {@link TestStorage} instance providing necessary configurations. - * @param bootstrapServer is the exact address including port (e.g., source-cluster-kafka-bootstrap:9092) - * @return A configured {@link KafkaClientsBuilder} instance for instant clients with plain communication setup. - */ - public static KafkaClientsBuilder getInstantPlainClientBuilder(TestStorage testStorage, String bootstrapServer) { - return instantClientBuilderBase(testStorage) - .withBootstrapAddress(bootstrapServer); - } - - // Instant TLS client builders - - /** - * Generates a {@link KafkaClientsBuilder} for instant Kafka clients using TLS communication. - * {@link TestStorage#getClusterName()} and port 9093 are used to generate kafka bootstrap address. - * - * @param testStorage The {@link TestStorage} instance providing necessary configurations. - * @return A configured {@link KafkaClientsBuilder} instance for instant clients with TLS communication setup. - */ - public static KafkaClientsBuilder getInstantTlsClientBuilder(TestStorage testStorage) { - return getInstantTlsClientBuilder(testStorage, KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())); - } - - /** - * Generates a {@link KafkaClientsBuilder} for instant Kafka clients using TLS communication. - * extending the base configuration with the Kafka cluster's plain bootstrap address. - * {@link TestStorage#getProducerName()} is used for naming producer Job and - * {@link TestStorage#getConsumerName()} for naming consumer Job. Finally, - * {@link TestStorage#getTopicName()} is used as Topic target by attempted message transition. - * - * @param testStorage The {@link TestStorage} instance providing necessary configurations. - * @param bootstrapServer is the exact address including port (e.g., source-cluster-kafka-bootstrap:9093) - * @return A configured {@link KafkaClientsBuilder} instance for instant clients with plain communication setup. - */ - public static KafkaClientsBuilder getInstantTlsClientBuilder(TestStorage testStorage, String bootstrapServer) { - return instantClientBuilderBase(testStorage) - .withUsername(testStorage.getUsername()) - .withBootstrapAddress(bootstrapServer); - } - - //////////////////////////////////////////////////////////// - // (already build) instant clients (utilizing builders above) - ///////////////////////////////////////////////////////////// - - /** - * Retrieves an instance of {@link KafkaClients} for plain communication with scramsha activated. - * Targeting bootstrap address on port 9093 and leveraging the {@code getInstantPlainClientBuilder} - * method for initial configuration. - * - * @param testStorage The {@link TestStorage} instance providing necessary configurations. - * @return build {@link KafkaClients}. - */ - public static KafkaClients getInstantScramShaOverPlainClients(TestStorage testStorage) { - return getInstantScramShaOverPlainClientBuilder(testStorage).build(); - } - - /** - * Retrieves an instance of {@link KafkaClients} for plain communication with scramsha activated. - * Targeting bootstrap address on port 9093 and leveraging the {@code getInstantTlsClientBuilder} - * method for initial configuration. - * - * @param testStorage The {@link TestStorage} instance providing necessary configurations. - * @return build {@link KafkaClients}. - */ - public static KafkaClients getInstantScramShaOverTlsClients(TestStorage testStorage) { - return getInstantScramShaOverTlsClientBuilder(testStorage).build(); - } - - /** - * Retrieves an instance of {@link KafkaClients} for plain communication with scramsha activated. - * Leveraging the {@code getInstantPlainClientBuilder} method for initial configuration. - * - * @param testStorage The {@link TestStorage} instance providing necessary configurations. - * @param bootstrapServer is the exact address including port (e.g., source-cluster-kafka-bootstrap:9096) - * @return build {@link KafkaClients}. - */ - public static KafkaClients getInstantScramShaClients(TestStorage testStorage, String bootstrapServer) { - return getInstantScramShaClientBuilder(testStorage, bootstrapServer).build(); - } - - /** - * Retrieves an instance of {@link KafkaClients} for plain communication with Kafka brokers targeting port 9092 and, - * leveraging the {@code getInstantPlainClientBuilder} method for initial configuration. - * - * @param testStorage The {@link TestStorage} instance providing necessary configurations. - * @return build {@link KafkaClients}. - */ - public static KafkaClients getInstantPlainClients(TestStorage testStorage) { - return getInstantPlainClientBuilder(testStorage, KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).build(); - } - - /** - * Retrieves an instance of {@link KafkaClients} for plain communication with Kafka brokers, - * leveraging the {@code getInstantPlainClientBuilder} method for initial configuration. - * - * @param testStorage The {@link TestStorage} instance providing necessary configurations. - * @param bootstrapServer is the exact address including port (e.g., source-cluster-kafka-bootstrap:9092) - * @return build {@link KafkaClients}. - */ - public static KafkaClients getInstantPlainClients(TestStorage testStorage, String bootstrapServer) { - return getInstantPlainClientBuilder(testStorage, bootstrapServer).build(); - } - - /** - * Retrieves an instance of {@link KafkaClients} for tls communication with Kafka brokers, targeting port 9093 and - * leveraging the {@code getInstantTlsClientBuilder} method for initial configuration. - * - * @param testStorage The {@link TestStorage} instance providing necessary configurations. - * @return build {@link KafkaClients}. - */ - public static KafkaClients getInstantTlsClients(TestStorage testStorage) { - return getInstantTlsClientBuilder(testStorage, KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())).build(); - } - - /** - * Retrieves an instance of {@link KafkaClients} for tls communication with Kafka brokers, - * leveraging the {@code getInstantTlsClientBuilder} method for initial configuration. - * - * @param testStorage The {@link TestStorage} instance providing necessary configurations. - * @param bootstrapServer is the exact address including port (e.g., source-cluster-kafka-bootstrap:9093) - * @return build {@link KafkaClients}. - */ - public static KafkaClients getInstantTlsClients(TestStorage testStorage, String bootstrapServer) { - return getInstantTlsClientBuilder(testStorage, bootstrapServer).build(); - } - - ////////////////////////////////// - // continuous client builders - ///////////////////////////////// - - /** - * Creates a {@link KafkaClientsBuilder} for continuous Kafka clients using plain (non-TLS) communication, - * configuring it with properties specific to continuous operation scenarios. This includes setting up - * (default 200 messages), a delay between messages (1000 ms) making ideal transition last by default for around 3-4 minutes. - * {@link TestStorage#getContinuousProducerName()} is used for naming producer Job and - * {@link TestStorage#getContinuousConsumerName()} for naming consumer Job. Finally, - * {@link TestStorage#getContinuousTopicName()} is used as Topic target by attempted message transition. - * - * @param testStorage The {@link TestStorage} instance providing necessary configurations. - * @return A configured {@link KafkaClientsBuilder} instance ready for creating Kafka clients for continuous - * operations with plain communication, ready for further customization. - */ - public static KafkaClientsBuilder getContinuousPlainClientBuilder(TestStorage testStorage) { - return new KafkaClientsBuilder() - .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) - .withNamespaceName(testStorage.getNamespaceName()) - .withMessageCount(testStorage.getContinuousMessageCount()) // default 200 - .withDelayMs(1000) - .withTopicName(testStorage.getContinuousTopicName()) - .withProducerName(testStorage.getContinuousProducerName()) - .withConsumerName(testStorage.getContinuousConsumerName()); - } - } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeST.java b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeST.java index 9129f1a1998..bef40298ee6 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeST.java @@ -23,9 +23,6 @@ import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.ParallelTest; import io.strimzi.systemtest.docs.TestDocsLabels; -import io.strimzi.systemtest.kafkaclients.internalClients.BridgeClients; -import io.strimzi.systemtest.kafkaclients.internalClients.BridgeClientsBuilder; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; import io.strimzi.systemtest.labels.LabelSelectors; import io.strimzi.systemtest.resources.CrdClients; import io.strimzi.systemtest.resources.operator.ClusterOperatorConfigurationBuilder; @@ -41,6 +38,10 @@ import io.strimzi.systemtest.utils.kafkaUtils.KafkaBridgeUtils; import io.strimzi.systemtest.utils.kubeUtils.controllers.DeploymentUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils; +import io.strimzi.testclients.clients.http.HttpProducerConsumer; +import io.strimzi.testclients.clients.http.HttpProducerConsumerBuilder; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import io.vertx.core.json.JsonArray; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.logging.log4j.LogManager; @@ -77,6 +78,8 @@ class HttpBridgeST extends AbstractST { private static final Logger LOGGER = LogManager.getLogger(HttpBridgeST.class); private TestStorage suiteTestStorage; + private KafkaProducerConsumerBuilder kafkaProducerConsumerBuilder; + private HttpProducerConsumerBuilder httpProducerConsumerBuilder; @ParallelTest @TestDoc( @@ -99,26 +102,24 @@ class HttpBridgeST extends AbstractST { void testSendSimpleMessage() { final TestStorage testStorage = new TestStorage(KubeResourceManager.get().getTestContext()); - final BridgeClients kafkaBridgeClientJob = new BridgeClientsBuilder() + final HttpProducerConsumer httpProducerConsumer = httpProducerConsumerBuilder .withProducerName(testStorage.getProducerName()) - .withBootstrapAddress(KafkaBridgeResources.serviceName(suiteTestStorage.getClusterName())) - .withComponentName(KafkaBridgeResources.componentName(suiteTestStorage.getClusterName())) .withTopicName(testStorage.getTopicName()) - .withMessageCount(testStorage.getMessageCount()) - .withPort(TestConstants.HTTP_BRIDGE_DEFAULT_PORT) - .withDelayMs(1000) - .withPollInterval(1000) - .withNamespaceName(testStorage.getNamespaceName()) .build(); // Create topic KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), suiteTestStorage.getClusterName()).build()); - KubeResourceManager.get().createResourceWithWait(kafkaBridgeClientJob.producerStrimziBridge()); + KubeResourceManager.get().createResourceWithWait(httpProducerConsumer.getProducer().getJob()); ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); - final KafkaClients kafkaClients = ClientUtils.getInstantPlainClients(testStorage, KafkaResources.plainBootstrapAddress(suiteTestStorage.getClusterName())); - KubeResourceManager.get().createResourceWithWait(kafkaClients.consumerStrimzi()); + final KafkaProducerConsumer kafkaProducerConsumer = kafkaProducerConsumerBuilder + .withTopicName(testStorage.getTopicName()) + .withConsumerName(testStorage.getConsumerName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .build(); + + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); // Checking labels for KafkaBridge @@ -145,24 +146,22 @@ void testReceiveSimpleMessage() { KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(Environment.TEST_SUITE_NAMESPACE, testStorage.getTopicName(), suiteTestStorage.getClusterName()).build()); - final BridgeClients kafkaBridgeClientJob = new BridgeClientsBuilder() + final HttpProducerConsumer httpProducerConsumer = httpProducerConsumerBuilder .withConsumerName(testStorage.getConsumerName()) - .withBootstrapAddress(KafkaBridgeResources.serviceName(suiteTestStorage.getClusterName())) - .withComponentName(KafkaBridgeResources.componentName(suiteTestStorage.getClusterName())) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) .withTopicName(testStorage.getTopicName()) - .withMessageCount(testStorage.getMessageCount()) - .withPort(TestConstants.HTTP_BRIDGE_DEFAULT_PORT) - .withDelayMs(1000) - .withPollInterval(1000) - .withNamespaceName(testStorage.getNamespaceName()) .build(); // Start receiving messages with bridge - KubeResourceManager.get().createResourceWithWait(kafkaBridgeClientJob.consumerStrimziBridge()); + KubeResourceManager.get().createResourceWithWait(httpProducerConsumer.getConsumer().getJob()); // Send messages to Kafka - final KafkaClients kafkaClients = ClientUtils.getInstantPlainClients(testStorage, KafkaResources.plainBootstrapAddress(suiteTestStorage.getClusterName())); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerStrimzi()); + final KafkaProducerConsumer kafkaProducerConsumer = kafkaProducerConsumerBuilder + .withTopicName(testStorage.getTopicName()) + .withProducerName(testStorage.getProducerName()) + .build(); + + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getProducer().getJob()); ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } @@ -526,5 +525,18 @@ void createClassResources() { .endConsumer() .endSpec() .build()); + + kafkaProducerConsumerBuilder = new KafkaProducerConsumerBuilder() + .withNamespaceName(suiteTestStorage.getNamespaceName()) + .withMessageCount(suiteTestStorage.getMessageCount()) + .withTopicName(suiteTestStorage.getTopicName()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(suiteTestStorage.getClusterName())); + + httpProducerConsumerBuilder = new HttpProducerConsumerBuilder() + .withHostname(KafkaBridgeResources.serviceName(suiteTestStorage.getClusterName())) + .withTopicName(suiteTestStorage.getTopicName()) + .withMessageCount(suiteTestStorage.getMessageCount()) + .withPort(TestConstants.HTTP_BRIDGE_DEFAULT_PORT) + .withNamespaceName(Environment.TEST_SUITE_NAMESPACE); } } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeScramShaST.java b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeScramShaST.java index 8662c56f100..021796f572a 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeScramShaST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeScramShaST.java @@ -22,9 +22,7 @@ import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.ParallelTest; import io.strimzi.systemtest.docs.TestDocsLabels; -import io.strimzi.systemtest.kafkaclients.internalClients.BridgeClients; -import io.strimzi.systemtest.kafkaclients.internalClients.BridgeClientsBuilder; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; +import io.strimzi.systemtest.kafkaclients.ClientsAuthentication; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; import io.strimzi.systemtest.storage.TestStorage; import io.strimzi.systemtest.templates.crd.KafkaBridgeTemplates; @@ -33,6 +31,10 @@ import io.strimzi.systemtest.templates.crd.KafkaTopicTemplates; import io.strimzi.systemtest.templates.crd.KafkaUserTemplates; import io.strimzi.systemtest.utils.ClientUtils; +import io.strimzi.testclients.clients.http.HttpProducerConsumer; +import io.strimzi.testclients.clients.http.HttpProducerConsumerBuilder; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -63,7 +65,8 @@ ) class HttpBridgeScramShaST extends AbstractST { private static final Logger LOGGER = LogManager.getLogger(HttpBridgeScramShaST.class); - private BridgeClients kafkaBridgeClientJob; + private HttpProducerConsumerBuilder httpProducerConsumerBuilder; + private KafkaProducerConsumerBuilder kafkaProducerConsumerBuilder; private TestStorage suiteTestStorage; @ParallelTest @@ -85,23 +88,24 @@ class HttpBridgeScramShaST extends AbstractST { void testSendSimpleMessageTlsScramSha() { final TestStorage testStorage = new TestStorage(KubeResourceManager.get().getTestContext()); - final BridgeClients kafkaBridgeClientJb = new BridgeClientsBuilder(kafkaBridgeClientJob) + final HttpProducerConsumer bridgeProducerConsumer = httpProducerConsumerBuilder .withTopicName(testStorage.getTopicName()) .withProducerName(testStorage.getProducerName()) .build(); + final KafkaProducerConsumer producerConsumer = kafkaProducerConsumerBuilder + .withTopicName(testStorage.getTopicName()) + .withConsumerName(testStorage.getConsumerName()) + .build(); + // Create topic KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), suiteTestStorage.getClusterName()).build()); - KubeResourceManager.get().createResourceWithWait(kafkaBridgeClientJb.producerStrimziBridge()); + KubeResourceManager.get().createResourceWithWait(bridgeProducerConsumer.getProducer().getJob()); ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); - final KafkaClients kafkaClients = ClientUtils.getInstantTlsClientBuilder(testStorage, KafkaResources.tlsBootstrapAddress(suiteTestStorage.getClusterName())) - .withUsername(suiteTestStorage.getUsername()) - .build(); - - KubeResourceManager.get().createResourceWithWait(kafkaClients.consumerScramShaTlsStrimzi(suiteTestStorage.getClusterName())); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(producerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); } @ParallelTest @@ -121,20 +125,21 @@ void testSendSimpleMessageTlsScramSha() { void testReceiveSimpleMessageTlsScramSha() { final TestStorage testStorage = new TestStorage(KubeResourceManager.get().getTestContext()); - final BridgeClients kafkaBridgeClientJb = new BridgeClientsBuilder(kafkaBridgeClientJob) + final HttpProducerConsumer bridgeProducerConsumer = httpProducerConsumerBuilder .withTopicName(testStorage.getTopicName()) .withConsumerName(testStorage.getConsumerName()) .build(); KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(Environment.TEST_SUITE_NAMESPACE, testStorage.getTopicName(), suiteTestStorage.getClusterName()).build()); - KubeResourceManager.get().createResourceWithWait(kafkaBridgeClientJb.consumerStrimziBridge()); + KubeResourceManager.get().createResourceWithWait(bridgeProducerConsumer.getConsumer().getJob()); // Send messages to Kafka - KafkaClients kafkaClients = ClientUtils.getInstantScramShaClientBuilder(testStorage, KafkaResources.tlsBootstrapAddress(suiteTestStorage.getClusterName())) - .withUsername(suiteTestStorage.getUsername()) + final KafkaProducerConsumer producerConsumer = kafkaProducerConsumerBuilder + .withTopicName(testStorage.getTopicName()) + .withProducerName(testStorage.getProducerName()) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerScramShaTlsStrimzi(suiteTestStorage.getClusterName())); + KubeResourceManager.get().createResourceWithWait(producerConsumer.getProducer().getJob()); ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } @@ -202,13 +207,20 @@ void setUp() { .build() ); - kafkaBridgeClientJob = new BridgeClientsBuilder() - .withBootstrapAddress(KafkaBridgeResources.serviceName(suiteTestStorage.getClusterName())) - .withComponentName(KafkaBridgeResources.componentName(suiteTestStorage.getClusterName())) + kafkaProducerConsumerBuilder = new KafkaProducerConsumerBuilder() + .withNamespaceName(suiteTestStorage.getNamespaceName()) + .withMessageCount(suiteTestStorage.getMessageCount()) + .withTopicName(suiteTestStorage.getTopicName()) + .withProducerName(suiteTestStorage.getProducerName()) + .withConsumerName(suiteTestStorage.getConsumerName()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(suiteTestStorage.getClusterName())) + .withAuthentication(ClientsAuthentication.configureTlsScramSha(suiteTestStorage.getNamespaceName(), suiteTestStorage.getUsername(), suiteTestStorage.getClusterName())); + + httpProducerConsumerBuilder = new HttpProducerConsumerBuilder() + .withHostname(KafkaBridgeResources.serviceName(suiteTestStorage.getClusterName())) .withTopicName(suiteTestStorage.getTopicName()) .withMessageCount(suiteTestStorage.getMessageCount()) .withPort(TestConstants.HTTP_BRIDGE_DEFAULT_PORT) - .withNamespaceName(Environment.TEST_SUITE_NAMESPACE) - .build(); + .withNamespaceName(Environment.TEST_SUITE_NAMESPACE); } } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeServerTlsST.java b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeServerTlsST.java index 4669156328d..1cddbd21282 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeServerTlsST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeServerTlsST.java @@ -17,9 +17,6 @@ import io.strimzi.systemtest.Environment; import io.strimzi.systemtest.annotations.ParallelTest; import io.strimzi.systemtest.docs.TestDocsLabels; -import io.strimzi.systemtest.kafkaclients.internalClients.BridgeClients; -import io.strimzi.systemtest.kafkaclients.internalClients.BridgeClientsBuilder; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; import io.strimzi.systemtest.storage.TestStorage; import io.strimzi.systemtest.templates.crd.KafkaBridgeTemplates; @@ -28,6 +25,10 @@ import io.strimzi.systemtest.templates.crd.KafkaTopicTemplates; import io.strimzi.systemtest.templates.crd.KafkaUserTemplates; import io.strimzi.systemtest.utils.ClientUtils; +import io.strimzi.testclients.clients.http.HttpProducerConsumer; +import io.strimzi.testclients.clients.http.HttpProducerConsumerBuilder; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -55,7 +56,8 @@ ) class HttpBridgeServerTlsST extends AbstractST { private static final Logger LOGGER = LogManager.getLogger(HttpBridgeServerTlsST.class); - private BridgeClients kafkaBridgeClientJob; + private HttpProducerConsumerBuilder httpProducerConsumerBuilder; + private KafkaProducerConsumerBuilder kafkaProducerConsumerBuilder; private TestStorage suiteTestStorage; @ParallelTest @@ -76,18 +78,23 @@ class HttpBridgeServerTlsST extends AbstractST { void testSendSimpleMessageTls() { final TestStorage testStorage = new TestStorage(KubeResourceManager.get().getTestContext()); - BridgeClients kafkaBridgeClientJobProduce = new BridgeClientsBuilder(kafkaBridgeClientJob) + HttpProducerConsumer bridgeProducerConsumer = httpProducerConsumerBuilder .withTopicName(testStorage.getTopicName()) .withProducerName(testStorage.getProducerName()) .build(); KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), suiteTestStorage.getClusterName()).build()); - KubeResourceManager.get().createResourceWithWait(kafkaBridgeClientJobProduce.producerTlsStrimziBridge(suiteTestStorage.getClusterName())); + KubeResourceManager.get().createResourceWithWait(bridgeProducerConsumer.getProducer().getJob()); ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); - final KafkaClients kafkaClients = ClientUtils.getInstantPlainClients(testStorage, KafkaResources.plainBootstrapAddress(suiteTestStorage.getClusterName())); - KubeResourceManager.get().createResourceWithWait(kafkaClients.consumerStrimzi()); + final KafkaProducerConsumer producerConsumer = kafkaProducerConsumerBuilder + .withConsumerName(testStorage.getConsumerName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .build(); + + KubeResourceManager.get().createResourceWithWait(producerConsumer.getConsumer().getJob()); ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); } @@ -110,19 +117,24 @@ void testSendSimpleMessageTls() { void testReceiveSimpleMessageTls() { final TestStorage testStorage = new TestStorage(KubeResourceManager.get().getTestContext()); - BridgeClients kafkaBridgeClientJobConsume = new BridgeClientsBuilder(kafkaBridgeClientJob) + HttpProducerConsumer bridgeProducerConsumer = httpProducerConsumerBuilder .withTopicName(testStorage.getTopicName()) .withConsumerName(testStorage.getConsumerName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) .build(); KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), suiteTestStorage.getClusterName()).build()); // Start receiving messages with bridge - KubeResourceManager.get().createResourceWithWait(kafkaBridgeClientJobConsume.consumerTlsStrimziBridge(suiteTestStorage.getClusterName())); + KubeResourceManager.get().createResourceWithWait(bridgeProducerConsumer.getConsumer().getJob()); // Send messages to Kafka - final KafkaClients kafkaClients = ClientUtils.getInstantPlainClients(testStorage, KafkaResources.plainBootstrapAddress(suiteTestStorage.getClusterName())); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerStrimzi()); + final KafkaProducerConsumer producerConsumer = kafkaProducerConsumerBuilder + .withProducerName(testStorage.getProducerName()) + .withTopicName(testStorage.getTopicName()) + .build(); + + KubeResourceManager.get().createResourceWithWait(producerConsumer.getProducer().getJob()); ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } @@ -139,8 +151,8 @@ void setUp() { LOGGER.info("Deploying Kafka and KafkaBridge before tests"); KubeResourceManager.get().createResourceWithWait( - KafkaNodePoolTemplates.brokerPoolPersistentStorage(suiteTestStorage.getNamespaceName(), suiteTestStorage.getBrokerPoolName(), suiteTestStorage.getClusterName(), 1).build(), - KafkaNodePoolTemplates.controllerPoolPersistentStorage(suiteTestStorage.getNamespaceName(), suiteTestStorage.getControllerPoolName(), suiteTestStorage.getClusterName(), 1).build() + KafkaNodePoolTemplates.brokerPoolPersistentStorage(suiteTestStorage.getNamespaceName(), suiteTestStorage.getBrokerPoolName(), suiteTestStorage.getClusterName(), 1).build(), + KafkaNodePoolTemplates.controllerPoolPersistentStorage(suiteTestStorage.getNamespaceName(), suiteTestStorage.getControllerPoolName(), suiteTestStorage.getClusterName(), 1).build() ); KubeResourceManager.get().createResourceWithWait(KafkaTemplates.kafka(suiteTestStorage.getNamespaceName(), suiteTestStorage.getClusterName(), 1).build()); @@ -151,31 +163,36 @@ void setUp() { // Deploy http bridge KubeResourceManager.get().createResourceWithWait(KafkaBridgeTemplates.kafkaBridge(suiteTestStorage.getNamespaceName(), suiteTestStorage.getClusterName(), - KafkaResources.plainBootstrapAddress(suiteTestStorage.getClusterName()), 1) - .editSpec() - .withNewConsumer() - .addToConfig(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") - .endConsumer() - .withNewHttp() - .withPort(8443) - .withNewTls() - .withNewCertificateAndKey() - .withSecretName(KafkaBridgeResources.serviceName(suiteTestStorage.getClusterName())) - .withCertificate("user.crt") - .withKey("user.key") - .endCertificateAndKey() - .endTls() - .endHttp() - .endSpec() - .build()); - - kafkaBridgeClientJob = new BridgeClientsBuilder() - .withBootstrapAddress(KafkaBridgeResources.serviceName(suiteTestStorage.getClusterName())) - .withComponentName(KafkaBridgeResources.componentName(suiteTestStorage.getClusterName())) + KafkaResources.plainBootstrapAddress(suiteTestStorage.getClusterName()), 1) + .editSpec() + .withNewConsumer() + .addToConfig(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") + .endConsumer() + .withNewHttp() + .withPort(8443) + .withNewTls() + .withNewCertificateAndKey() + .withSecretName(KafkaBridgeResources.serviceName(suiteTestStorage.getClusterName())) + .withCertificate("user.crt") + .withKey("user.key") + .endCertificateAndKey() + .endTls() + .endHttp() + .endSpec() + .build()); + + kafkaProducerConsumerBuilder = new KafkaProducerConsumerBuilder() + .withNamespaceName(suiteTestStorage.getNamespaceName()) + .withMessageCount(suiteTestStorage.getMessageCount()) + .withTopicName(suiteTestStorage.getTopicName()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(suiteTestStorage.getClusterName())); + + httpProducerConsumerBuilder = new HttpProducerConsumerBuilder() + .withHostname(KafkaBridgeResources.serviceName(suiteTestStorage.getClusterName())) .withTopicName(suiteTestStorage.getTopicName()) .withMessageCount(suiteTestStorage.getMessageCount()) .withPort(8443) .withNamespaceName(Environment.TEST_SUITE_NAMESPACE) - .build(); + .withSslTruststoreCertificate(KafkaBridgeResources.serviceName(suiteTestStorage.getClusterName())); } } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeTlsST.java b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeTlsST.java index a9b0e8810e0..14b1a93463f 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeTlsST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeTlsST.java @@ -28,9 +28,7 @@ import io.strimzi.systemtest.TestTags; import io.strimzi.systemtest.annotations.ParallelTest; import io.strimzi.systemtest.docs.TestDocsLabels; -import io.strimzi.systemtest.kafkaclients.internalClients.BridgeClients; -import io.strimzi.systemtest.kafkaclients.internalClients.BridgeClientsBuilder; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; +import io.strimzi.systemtest.kafkaclients.ClientsAuthentication; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; import io.strimzi.systemtest.storage.TestStorage; import io.strimzi.systemtest.templates.crd.KafkaBridgeTemplates; @@ -39,6 +37,10 @@ import io.strimzi.systemtest.templates.crd.KafkaTopicTemplates; import io.strimzi.systemtest.templates.crd.KafkaUserTemplates; import io.strimzi.systemtest.utils.ClientUtils; +import io.strimzi.testclients.clients.http.HttpProducerConsumer; +import io.strimzi.testclients.clients.http.HttpProducerConsumerBuilder; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -66,7 +68,9 @@ ) class HttpBridgeTlsST extends AbstractST { private static final Logger LOGGER = LogManager.getLogger(HttpBridgeTlsST.class); - private BridgeClients kafkaBridgeClientJob; + + private HttpProducerConsumerBuilder httpProducerConsumerBuilder; + private KafkaProducerConsumerBuilder kafkaProducerConsumerBuilder; private TestStorage suiteTestStorage; @ParallelTest @@ -87,21 +91,23 @@ class HttpBridgeTlsST extends AbstractST { void testSendSimpleMessageTls() { final TestStorage testStorage = new TestStorage(KubeResourceManager.get().getTestContext()); - BridgeClients kafkaBridgeClientJobProduce = new BridgeClientsBuilder(kafkaBridgeClientJob) + final HttpProducerConsumer httpProducerConsumer = httpProducerConsumerBuilder .withTopicName(testStorage.getTopicName()) .withProducerName(testStorage.getProducerName()) .build(); KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), suiteTestStorage.getClusterName()).build()); - KubeResourceManager.get().createResourceWithWait(kafkaBridgeClientJobProduce.producerStrimziBridge()); + KubeResourceManager.get().createResourceWithWait(httpProducerConsumer.getProducer().getJob()); ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); - final KafkaClients kafkaClients = ClientUtils.getInstantTlsClientBuilder(testStorage, KafkaResources.tlsBootstrapAddress(suiteTestStorage.getClusterName())) - .withUsername(suiteTestStorage.getUsername()) + final KafkaProducerConsumer kafkaProducerConsumer = kafkaProducerConsumerBuilder + .withConsumerName(testStorage.getConsumerName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withTopicName(testStorage.getTopicName()) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.consumerTlsStrimzi(suiteTestStorage.getClusterName())); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); } @@ -124,21 +130,23 @@ void testSendSimpleMessageTls() { void testReceiveSimpleMessageTls() { final TestStorage testStorage = new TestStorage(KubeResourceManager.get().getTestContext()); - BridgeClients kafkaBridgeClientJobConsume = new BridgeClientsBuilder(kafkaBridgeClientJob) + final HttpProducerConsumer httpProducerConsumer = httpProducerConsumerBuilder .withTopicName(testStorage.getTopicName()) .withConsumerName(testStorage.getConsumerName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) .build(); KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), suiteTestStorage.getClusterName()).build()); - KubeResourceManager.get().createResourceWithWait(kafkaBridgeClientJobConsume.consumerStrimziBridge()); + KubeResourceManager.get().createResourceWithWait(httpProducerConsumer.getConsumer().getJob()); // Send messages to Kafka - final KafkaClients kafkaClients = ClientUtils.getInstantTlsClientBuilder(testStorage, KafkaResources.tlsBootstrapAddress(suiteTestStorage.getClusterName())) - .withUsername(suiteTestStorage.getUsername()) + final KafkaProducerConsumer kafkaProducerConsumer = kafkaProducerConsumerBuilder + .withProducerName(testStorage.getProducerName()) + .withTopicName(testStorage.getTopicName()) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(suiteTestStorage.getClusterName())); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getProducer().getJob()); ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } @@ -222,15 +230,15 @@ private void testWeirdUsername(String weirdUserName, KafkaListenerAuthentication .endSpec() .build()); - BridgeClients kafkaBridgeClientJob = new BridgeClientsBuilder() + HttpProducerConsumer httpProducerConsumer = new HttpProducerConsumerBuilder() .withProducerName(bridgeProducerName) .withConsumerName(bridgeConsumerName) - .withBootstrapAddress(KafkaBridgeResources.serviceName(testStorage.getClusterName())) - .withComponentName(KafkaBridgeResources.componentName(testStorage.getClusterName())) + .withHostname(KafkaBridgeResources.serviceName(testStorage.getClusterName())) .withTopicName(testStorage.getTopicName()) .withMessageCount(testStorage.getMessageCount()) .withPort(TestConstants.HTTP_BRIDGE_DEFAULT_PORT) .withNamespaceName(testStorage.getNamespaceName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) .build(); // Create topic @@ -254,20 +262,28 @@ private void testWeirdUsername(String weirdUserName, KafkaListenerAuthentication .endSpec() .build()); - KubeResourceManager.get().createResourceWithWait(kafkaBridgeClientJob.consumerStrimziBridge()); + KubeResourceManager.get().createResourceWithWait(httpProducerConsumer.getConsumer().getJob()); - final KafkaClients kafkaClients = ClientUtils.getInstantTlsClientBuilder(testStorage) - .withUsername(weirdUserName) - .build(); + final KafkaProducerConsumerBuilder producerConsumerBuilder = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withMessageCount(testStorage.getMessageCount()) + .withTopicName(testStorage.getTopicName()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())); if (auth.getType().equals(TestConstants.TLS_LISTENER_DEFAULT_NAME)) { // tls producer - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(testStorage.getClusterName())); + producerConsumerBuilder + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getClusterName(), weirdUserName)); } else { // scram-sha producer - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerScramShaTlsStrimzi(testStorage.getClusterName())); + producerConsumerBuilder + .withAuthentication(ClientsAuthentication.configureTlsScramSha(testStorage.getNamespaceName(), weirdUserName, testStorage.getClusterName())); } + KubeResourceManager.get().createResourceWithWait(producerConsumerBuilder.build().getProducer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), bridgeConsumerName, testStorage.getMessageCount()); @@ -331,13 +347,18 @@ void setUp() { .endSpec() .build()); - kafkaBridgeClientJob = new BridgeClientsBuilder() - .withBootstrapAddress(KafkaBridgeResources.serviceName(suiteTestStorage.getClusterName())) - .withComponentName(KafkaBridgeResources.componentName(suiteTestStorage.getClusterName())) + kafkaProducerConsumerBuilder = new KafkaProducerConsumerBuilder() + .withNamespaceName(suiteTestStorage.getNamespaceName()) + .withMessageCount(suiteTestStorage.getMessageCount()) + .withTopicName(suiteTestStorage.getTopicName()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(suiteTestStorage.getClusterName())) + .withAuthentication(ClientsAuthentication.configureTls(suiteTestStorage.getClusterName(), suiteTestStorage.getUsername())); + + httpProducerConsumerBuilder = new HttpProducerConsumerBuilder() + .withHostname(KafkaBridgeResources.serviceName(suiteTestStorage.getClusterName())) .withTopicName(suiteTestStorage.getTopicName()) .withMessageCount(suiteTestStorage.getMessageCount()) .withPort(TestConstants.HTTP_BRIDGE_DEFAULT_PORT) - .withNamespaceName(Environment.TEST_SUITE_NAMESPACE) - .build(); + .withNamespaceName(Environment.TEST_SUITE_NAMESPACE); } } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectBuilderST.java b/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectBuilderST.java index feff43074c9..668dce1340b 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectBuilderST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectBuilderST.java @@ -35,7 +35,6 @@ import io.strimzi.systemtest.annotations.OpenShiftOnly; import io.strimzi.systemtest.annotations.ParallelTest; import io.strimzi.systemtest.docs.TestDocsLabels; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; import io.strimzi.systemtest.resources.CrdClients; import io.strimzi.systemtest.resources.operator.ClusterOperatorConfigurationBuilder; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; @@ -52,6 +51,10 @@ import io.strimzi.systemtest.utils.kafkaUtils.KafkaTopicUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.NetworkPolicyUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils; +import io.strimzi.testclients.clients.kafka.KafkaConsumerClient; +import io.strimzi.testclients.clients.kafka.KafkaConsumerClientBuilder; +import io.strimzi.testclients.clients.kafka.KafkaProducerClient; +import io.strimzi.testclients.clients.kafka.KafkaProducerClientBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.BeforeAll; @@ -295,9 +298,17 @@ void testBuildWithJarTgzAndZip() { KafkaConnector kafkaConnector = CrdClients.kafkaConnectorClient().inNamespace(testStorage.getNamespaceName()).withName(testStorage.getClusterName()).get(); assertThat(kafkaConnector.getSpec().getClassName(), is(TestConstants.ECHO_SINK_CLASS_NAME)); - final KafkaClients kafkaClients = ClientUtils.getInstantPlainClients(testStorage, KafkaResources.plainBootstrapAddress(suiteTestStorage.getClusterName())); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerStrimzi()); - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + KafkaProducerClient kafkaProducerClient = new KafkaProducerClientBuilder() + .withName(testStorage.getProducerName()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(suiteTestStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait(kafkaProducerClient.getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); String connectPodName = PodUtils.listPodNames(testStorage.getNamespaceName(), testStorage.getKafkaConnectSelector()).get(0); PodUtils.waitUntilMessageIsInPodLogs(testStorage.getNamespaceName(), connectPodName, "Received message with key 'null' and value 'Hello-world - 99'"); @@ -583,9 +594,17 @@ void testBuildPluginUsingMavenCoordinatesArtifacts() { .endSpec() .build()); - final KafkaClients kafkaClient = ClientUtils.getInstantPlainClients(testStorage, KafkaResources.plainBootstrapAddress(suiteTestStorage.getClusterName())); - KubeResourceManager.get().createResourceWithWait(kafkaClient.consumerStrimzi()); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + KafkaConsumerClient kafkaConsumerClient = new KafkaConsumerClientBuilder() + .withName(testStorage.getConsumerName()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(suiteTestStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .build(); + + KubeResourceManager.get().createResourceWithWait(kafkaConsumerClient.getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); } private String getPluginFileNameFromConnectPod(final String namespaceName, final String connectPodName) { diff --git a/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java b/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java index 9dd421cdccc..1ebe21ae3f7 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectST.java @@ -53,8 +53,7 @@ import io.strimzi.systemtest.annotations.MicroShiftNotSupported; import io.strimzi.systemtest.annotations.ParallelNamespaceTest; import io.strimzi.systemtest.docs.TestDocsLabels; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder; +import io.strimzi.systemtest.kafkaclients.ClientsAuthentication; import io.strimzi.systemtest.resources.CrdClients; import io.strimzi.systemtest.resources.operator.ClusterOperatorConfigurationBuilder; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; @@ -77,6 +76,12 @@ import io.strimzi.systemtest.utils.kubeUtils.objects.NetworkPolicyUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils; import io.strimzi.test.TestUtils; +import io.strimzi.testclients.clients.kafka.KafkaConsumerClient; +import io.strimzi.testclients.clients.kafka.KafkaConsumerClientBuilder; +import io.strimzi.testclients.clients.kafka.KafkaProducerClient; +import io.strimzi.testclients.clients.kafka.KafkaProducerClientBuilder; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import io.vertx.core.json.JsonObject; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -327,11 +332,23 @@ void testKafkaConnectWithPlainAndScramShaAuthentication() { KafkaConnectorUtils.createFileSinkConnector(testStorage.getNamespaceName(), scraperPodName, testStorage.getTopicName(), TestConstants.DEFAULT_SINK_FILE_PATH, KafkaConnectResources.url(testStorage.getClusterName(), testStorage.getNamespaceName(), 8083)); - final KafkaClients plainScramShaClients = ClientUtils.getInstantScramShaOverPlainClients(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withAuthentication(ClientsAuthentication.configurePlainScramSha(testStorage.getNamespaceName(), testStorage.getUsername())) + .withMessageCount(testStorage.getMessageCount()) + .build(); + KubeResourceManager.get().createResourceWithWait( - plainScramShaClients.producerScramShaPlainStrimzi(), - plainScramShaClients.consumerScramShaPlainStrimzi()); - ClientUtils.waitForInstantClientSuccess(testStorage); + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); KafkaConnectUtils.waitForMessagesInKafkaConnectFileSink(testStorage.getNamespaceName(), kafkaConnectPodName, TestConstants.DEFAULT_SINK_FILE_PATH, testStorage.getMessageCount()); } @@ -388,9 +405,19 @@ void testKafkaConnectAndConnectorFileSinkPlugin() { final String scraperPodName = KubeResourceManager.get().kubeClient().listPodsByPrefixInName(testStorage.getNamespaceName(), testStorage.getScraperName()).get(0).getMetadata().getName(); - final KafkaClients kafkaClients = ClientUtils.getInstantPlainClients(testStorage); - KubeResourceManager.get().createResourceWithWait(kafkaClients.consumerStrimzi()); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + final KafkaConsumerClient kafkaConsumerClient = new KafkaConsumerClientBuilder() + .withName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaConsumerClient.getJob() + ); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); String service = KafkaConnectResources.url(testStorage.getClusterName(), testStorage.getNamespaceName(), 8083); String output = KubeResourceManager.get().kubeCmdClient().inNamespace(testStorage.getNamespaceName()).execInPod(scraperPodName, "/bin/bash", "-c", "curl " + service + "/connectors/" + connectorName).out(); @@ -589,9 +616,23 @@ void testSecretsWithKafkaConnectWithTlsAndTlsClientAuthentication() { KafkaConnectorUtils.createFileSinkConnector(testStorage.getNamespaceName(), scraperPodName, testStorage.getTopicName(), TestConstants.DEFAULT_SINK_FILE_PATH, KafkaConnectResources.url(testStorage.getClusterName(), testStorage.getNamespaceName(), 8083)); - final KafkaClients kafkaClients = ClientUtils.getInstantTlsClients(testStorage); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(testStorage.getClusterName()), kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())) + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getClusterName(), testStorage.getUsername())) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); KafkaConnectUtils.waitForMessagesInKafkaConnectFileSink(testStorage.getNamespaceName(), kafkaConnectPodName, TestConstants.DEFAULT_SINK_FILE_PATH, testStorage.getMessageCount()); } @@ -678,9 +719,23 @@ void testSecretsWithKafkaConnectWithTlsAndScramShaAuthentication() { KafkaConnectorUtils.createFileSinkConnector(testStorage.getNamespaceName(), scraperPodName, testStorage.getTopicName(), TestConstants.DEFAULT_SINK_FILE_PATH, KafkaConnectResources.url(testStorage.getClusterName(), testStorage.getNamespaceName(), 8083)); - final KafkaClients kafkaClients = ClientUtils.getInstantScramShaOverTlsClients(testStorage); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerScramShaTlsStrimzi(testStorage.getClusterName()), kafkaClients.consumerScramShaTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())) + .withAuthentication(ClientsAuthentication.configureTlsScramSha(testStorage.getNamespaceName(), testStorage.getUsername(), testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); KafkaConnectUtils.waitForMessagesInKafkaConnectFileSink(testStorage.getNamespaceName(), kafkaConnectPodName, TestConstants.DEFAULT_SINK_FILE_PATH, testStorage.getMessageCount()); } @@ -772,19 +827,24 @@ void testConnectorTaskAutoRestart() { .build()); // Send first batch of messages to the topic - KafkaClients kafkaClients = ClientUtils.getInstantPlainClientBuilder(testStorage) + KafkaProducerClient kafkaProducerClient = new KafkaProducerClientBuilder() + .withName(testStorage.getProducerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) .withMessageCount(firstBatchMessageCount) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerStrimzi()); - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + + KubeResourceManager.get().createResourceWithWait(kafkaProducerClient.getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); // Send second batch of messages to the topic - kafkaClients = new KafkaClientsBuilder(kafkaClients) + kafkaProducerClient = new KafkaProducerClientBuilder(kafkaProducerClient) .withMessageCount(secondBatchMessageCount) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerStrimzi()); - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerClient.getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); // After connector picks up messages from topic it fails task // If it's the first time echo-sink task failed - it's immediately restarted and connector adds count to autoRestartCount. @@ -963,9 +1023,22 @@ void testMultiNodeKafkaConnectWithConnectorCreation() { String workerNode = connectStatus.getJsonObject("connector").getString("worker_id").split(":")[0]; String connectorPodName = workerNode.substring(0, workerNode.indexOf(".")); - final KafkaClients kafkaClients = ClientUtils.getInstantPlainClients(testStorage); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerStrimzi(), kafkaClients.consumerStrimzi()); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); KafkaConnectUtils.waitForMessagesInKafkaConnectFileSink(testStorage.getNamespaceName(), connectorPodName, TestConstants.DEFAULT_SINK_FILE_PATH, testStorage.getMessageCount()); } @@ -1045,13 +1118,18 @@ void testConnectTlsAuthWithWeirdUserName() { .endSpec() .build()); - final KafkaClients clients = ClientUtils.getInstantTlsClientBuilder(testStorage) - .withUsername(weirdUserName) + KafkaProducerClient kafkaProducerClient = new KafkaProducerClientBuilder() + .withName(testStorage.getProducerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getClusterName(), weirdUserName)) .build(); LOGGER.info("Checking if user is able to produce messages"); - KubeResourceManager.get().createResourceWithWait(clients.producerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerClient.getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); LOGGER.info("Checking if connector is able to consume messages"); final String connectorPodName = KubeResourceManager.get().kubeClient().listPods(testStorage.getNamespaceName(), testStorage.getKafkaConnectSelector()).get(0).getMetadata().getName(); @@ -1136,13 +1214,18 @@ void testConnectScramShaAuthWithWeirdUserName() { .endSpec() .build()); - final KafkaClients clients = ClientUtils.getInstantScramShaOverTlsClientBuilder(testStorage) - .withUsername(weirdUserName) + KafkaProducerClient kafkaProducerClient = new KafkaProducerClientBuilder() + .withName(testStorage.getProducerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTlsScramSha(testStorage.getNamespaceName(), weirdUserName, testStorage.getClusterName())) .build(); - LOGGER.info("Checking if user is able to send messages"); - KubeResourceManager.get().createResourceWithWait(clients.producerScramShaTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + LOGGER.info("Checking if user is able to produce messages"); + KubeResourceManager.get().createResourceWithWait(kafkaProducerClient.getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); LOGGER.info("Checking if connector is able to consume messages"); final String connectorPodName = KubeResourceManager.get().kubeClient().listPods(testStorage.getNamespaceName(), testStorage.getKafkaConnectSelector()).get(0).getMetadata().getName(); @@ -1775,9 +1858,20 @@ void testConnectorOffsetManagement() throws JsonProcessingException { NetworkPolicyUtils.deployNetworkPolicyForResource(connect, KafkaConnectResources.componentName(testStorage.getClusterName())); final String scraperPodName = PodUtils.getPodsByPrefixInNameWithDynamicWait(testStorage.getNamespaceName(), testStorage.getScraperName()).get(0).getMetadata().getName(); - KafkaClients kafkaClients = ClientUtils.getInstantPlainClients(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerStrimzi(), kafkaClients.consumerStrimzi()); + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); KafkaConnectorUtils.waitForOffsetInFileSinkConnector(testStorage.getNamespaceName(), scraperPodName, testStorage.getClusterName(), testStorage.getClusterName(), TestConstants.MESSAGE_COUNT); @@ -1871,9 +1965,22 @@ void verifySinkConnectorByBlockAndUnblock(TestStorage testStorage, String kafkaC // messages need to be produced for each run (although there are more messages in topic eventually, sink will not copy messages it copied previously (before clearing them) LOGGER.info("Producing new messages which are to be watched KafkaConnector once it is resumed"); - final KafkaClients kafkaClients = ClientUtils.getInstantPlainClients(testStorage); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerStrimzi(), kafkaClients.consumerStrimzi()); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); LOGGER.info("Because KafkaConnector is blocked, no messages should appear to FileSink file"); assertThrows(Exception.class, () -> KafkaConnectUtils.waitForMessagesInKafkaConnectFileSink(testStorage.getNamespaceName(), kafkaConnectPodName, TestConstants.DEFAULT_SINK_FILE_PATH, testStorage.getMessageCount())); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlConfigurationST.java b/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlConfigurationST.java index aadf72d953b..7e5a6f80818 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlConfigurationST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlConfigurationST.java @@ -26,7 +26,6 @@ import io.strimzi.systemtest.storage.TestStorage; import io.strimzi.systemtest.templates.crd.KafkaNodePoolTemplates; import io.strimzi.systemtest.templates.crd.KafkaTemplates; -import io.strimzi.systemtest.templates.specific.AdminClientTemplates; import io.strimzi.systemtest.utils.AdminClientUtils; import io.strimzi.systemtest.utils.RollingUpdateUtils; import io.strimzi.systemtest.utils.kafkaUtils.KafkaUtils; @@ -34,6 +33,8 @@ import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils; import io.strimzi.systemtest.utils.specific.CruiseControlUtils; import io.strimzi.test.WaitException; +import io.strimzi.testclients.clients.kafka.KafkaAdminClient; +import io.strimzi.testclients.clients.kafka.KafkaAdminClientBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.BeforeAll; @@ -125,13 +126,13 @@ void testDeployAndUnDeployCruiseControl() throws IOException { LOGGER.info("Verifying that there is no configuration to CruiseControl metric reporter in Kafka ConfigMap"); assertThrows(WaitException.class, () -> CruiseControlUtils.verifyCruiseControlMetricReporterConfigurationInKafkaConfigMapIsPresent(testStorage.getClusterName(), testStorage.getNamespaceName(), brokerPodName)); - KubeResourceManager.get().createResourceWithWait( - AdminClientTemplates.plainAdminClient( - testStorage.getNamespaceName(), - testStorage.getAdminName(), - KafkaResources.plainBootstrapAddress(testStorage.getClusterName()) - ).build() - ); + final KafkaAdminClient kafkaAdminClient = new KafkaAdminClientBuilder() + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withName(testStorage.getAdminName()) + .withNamespaceName(testStorage.getNamespaceName()) + .build(); + + KubeResourceManager.get().createResourceWithWait(kafkaAdminClient.getDeployment()); final AdminClient adminClient = AdminClientUtils.getConfiguredAdminClient(testStorage.getNamespaceName(), testStorage.getAdminName()); LOGGER.info("Cruise Control Topics will not be deleted and will stay in the Kafka cluster"); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java b/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java index d4120b7914e..bcaa4a15ed1 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java @@ -36,7 +36,6 @@ import io.strimzi.systemtest.annotations.IsolatedTest; import io.strimzi.systemtest.annotations.ParallelNamespaceTest; import io.strimzi.systemtest.docs.TestDocsLabels; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; import io.strimzi.systemtest.kafkaclients.internalClients.admin.AdminClient; import io.strimzi.systemtest.resources.CrdClients; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; @@ -45,7 +44,6 @@ import io.strimzi.systemtest.templates.crd.KafkaRebalanceTemplates; import io.strimzi.systemtest.templates.crd.KafkaTemplates; import io.strimzi.systemtest.templates.crd.KafkaTopicTemplates; -import io.strimzi.systemtest.templates.specific.AdminClientTemplates; import io.strimzi.systemtest.templates.specific.ScraperTemplates; import io.strimzi.systemtest.utils.AdminClientUtils; import io.strimzi.systemtest.utils.ClientUtils; @@ -59,6 +57,10 @@ import io.strimzi.systemtest.utils.kubeUtils.controllers.DeploymentUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils; import io.strimzi.systemtest.utils.specific.CruiseControlUtils; +import io.strimzi.testclients.clients.kafka.KafkaAdminClient; +import io.strimzi.testclients.clients.kafka.KafkaAdminClientBuilder; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.BeforeAll; @@ -150,13 +152,13 @@ void testAutoCreationOfCruiseControlTopicsWithResources() { "-Xmx200M", "-Xms128M", "-XX:+UseG1GC"); KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(testStorage).build()); - KubeResourceManager.get().createResourceWithWait( - AdminClientTemplates.plainAdminClient( - testStorage.getNamespaceName(), - testStorage.getAdminName(), - KafkaResources.plainBootstrapAddress(testStorage.getClusterName()) - ).build() - ); + final KafkaAdminClient kafkaAdminClient = new KafkaAdminClientBuilder() + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withName(testStorage.getAdminName()) + .withNamespaceName(testStorage.getNamespaceName()) + .build(); + + KubeResourceManager.get().createResourceWithWait(kafkaAdminClient.getDeployment()); final AdminClient adminClient = AdminClientUtils.getConfiguredAdminClient(testStorage.getNamespaceName(), testStorage.getAdminName()); CruiseControlUtils.verifyThatCruiseControlTopicsArePresent(adminClient, defaultBrokerReplicaCount); } @@ -823,9 +825,22 @@ void testCruiseControlRemoveDisksMode() { KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), testStorage.getClusterName(), 24, 3).build()); // send some data to topic - final KafkaClients kafkaClients = ClientUtils.getInstantPlainClientBuilder(testStorage).withMessageCount(1000).build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerStrimzi(), kafkaClients.consumerStrimzi()); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(1000) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); final List brokersWithRemovedVolumes = Arrays.asList( new BrokerAndVolumeIdsBuilder().withBrokerId(0).withVolumeIds(Arrays.asList(1, 2)).build(), diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java index 464307bfa53..daec32c5aee 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/ConfigProviderST.java @@ -17,11 +17,11 @@ import io.skodjob.annotations.SuiteDoc; import io.skodjob.annotations.TestDoc; import io.skodjob.kubetest4j.resources.KubeResourceManager; +import io.strimzi.api.kafka.model.kafka.KafkaResources; import io.strimzi.operator.common.Annotations; import io.strimzi.systemtest.AbstractST; import io.strimzi.systemtest.annotations.ParallelNamespaceTest; import io.strimzi.systemtest.docs.TestDocsLabels; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; import io.strimzi.systemtest.storage.TestStorage; import io.strimzi.systemtest.templates.crd.KafkaConnectTemplates; @@ -30,6 +30,8 @@ import io.strimzi.systemtest.templates.crd.KafkaTemplates; import io.strimzi.systemtest.utils.ClientUtils; import io.strimzi.systemtest.utils.kafkaUtils.KafkaConnectUtils; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.BeforeAll; @@ -181,8 +183,18 @@ void testConnectWithConnectorUsingConfigAndEnvProvider() { .endSpec() .build()); - final KafkaClients kafkaBasicClientJob = ClientUtils.getInstantPlainClients(testStorage); - KubeResourceManager.get().createResourceWithWait(kafkaBasicClientJob.producerStrimzi()); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getProducer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); String kafkaConnectPodName = KubeResourceManager.get().kubeClient().listPods(testStorage.getNamespaceName(), testStorage.getKafkaConnectSelector()).get(0).getMetadata().getName(); KafkaConnectUtils.waitForMessagesInKafkaConnectFileSink(testStorage.getNamespaceName(), kafkaConnectPodName, customFileSinkPath, testStorage.getMessageCount()); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java index 9e8561cf6c2..8b7ab5a92e9 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaNodePoolST.java @@ -12,13 +12,13 @@ import io.skodjob.annotations.SuiteDoc; import io.skodjob.annotations.TestDoc; import io.skodjob.kubetest4j.resources.KubeResourceManager; +import io.strimzi.api.kafka.model.kafka.KafkaResources; import io.strimzi.api.kafka.model.nodepool.ProcessRoles; import io.strimzi.api.kafka.model.topic.KafkaTopic; import io.strimzi.operator.common.Annotations; import io.strimzi.systemtest.AbstractST; import io.strimzi.systemtest.annotations.ParallelNamespaceTest; import io.strimzi.systemtest.docs.TestDocsLabels; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; import io.strimzi.systemtest.labels.LabelSelectors; import io.strimzi.systemtest.resources.crd.KafkaComponents; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; @@ -33,6 +33,8 @@ import io.strimzi.systemtest.utils.kafkaUtils.KafkaUtils; import io.strimzi.systemtest.utils.kubeUtils.controllers.StrimziPodSetUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.BeforeAll; @@ -312,11 +314,22 @@ private void transmitMessagesWithNewTopicAndClean(TestStorage testStorage, int t KubeResourceManager.get().createResourceWithWait(kafkaTopic); LOGGER.info("Transmit messages with Kafka {}/{} using topic {}", testStorage.getNamespaceName(), testStorage.getClusterName(), topicName); - KafkaClients kafkaClients = ClientUtils.getInstantPlainClientBuilder(testStorage) + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) .withTopicName(topicName) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerStrimzi(), kafkaClients.consumerStrimzi()); - ClientUtils.waitForInstantClientSuccess(testStorage); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); // clean topic KubeResourceManager.get().deleteResourceWithWait(kafkaTopic); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java index 918eda77964..13ff5aac634 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaST.java @@ -55,7 +55,6 @@ import io.strimzi.systemtest.annotations.MultiNodeClusterOnly; import io.strimzi.systemtest.annotations.ParallelNamespaceTest; import io.strimzi.systemtest.docs.TestDocsLabels; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; import io.strimzi.systemtest.resources.CrdClients; import io.strimzi.systemtest.resources.crd.KafkaComponents; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; @@ -80,6 +79,8 @@ import io.strimzi.systemtest.utils.kubeUtils.objects.SecretUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.ServiceUtils; import io.strimzi.test.TestUtils; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.BeforeAll; @@ -690,9 +691,22 @@ void testLabelsExistenceAndManipulation() { assertThat(pvc.getMetadata().getAnnotations().get(pvcLabelOrAnnotationKey), is(pvcLabelOrAnnotationValue)); } - final KafkaClients kafkaClients = ClientUtils.getInstantPlainClients(testStorage); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerStrimzi(), kafkaClients.consumerStrimzi()); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); LOGGER.info("--> Test Customer specific labels manipulation (add, update) of Kafka CR and (update) PVC <--"); @@ -796,8 +810,11 @@ void testLabelsExistenceAndManipulation() { } LOGGER.info("Produce and Consume messages to make sure Kafka cluster is not broken by labels and annotations manipulation"); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerStrimzi(), kafkaClients.consumerStrimzi()); - ClientUtils.waitForInstantClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } @ParallelNamespaceTest @@ -864,9 +881,22 @@ void testMessagesAndConsumerOffsetFilesOnDisk() { LOGGER.info("Topic: {} is present in Kafka Broker: {} with no data", testStorage.getTopicName(), brokerPodName); assertThat("Topic contains data", topicData, emptyOrNullString()); - final KafkaClients kafkaClients = ClientUtils.getInstantPlainClients(testStorage); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerStrimzi(), kafkaClients.consumerStrimzi()); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); LOGGER.info("Verifying presence of files created to store offsets Topic"); String commandToGetFiles = "cd /var/lib/kafka/data/kafka-log0/; ls -l | grep __consumer_offsets | wc -l"; @@ -974,9 +1004,22 @@ void testReadOnlyRootFileSystem() { KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(testStorage).build()); - final KafkaClients kafkaClients = ClientUtils.getInstantPlainClientBuilder(testStorage).build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerStrimzi(), kafkaClients.consumerStrimzi()); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } @ParallelNamespaceTest @@ -1073,17 +1116,38 @@ void testResizeJbodVolumes() { producerAdditionConfiguration = producerAdditionConfiguration.concat("\ntransactional.id=" + testStorage.getContinuousTopicName() + ".1"); producerAdditionConfiguration = producerAdditionConfiguration.concat("\nenable.idempotence=true"); - KafkaClients kafkaBasicClientJob = ClientUtils.getContinuousPlainClientBuilder(testStorage) + final KafkaProducerConsumer continuousKafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getContinuousProducerName()) + .withConsumerName(testStorage.getContinuousConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getContinuousTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) .withMessageCount(continuousClientsMessageCount) + .withDelayMs(1000) .withAdditionalConfig(producerAdditionConfiguration) + .withAcks("all") .build(); - KubeResourceManager.get().createResourceWithWait(kafkaBasicClientJob.producerStrimzi(), kafkaBasicClientJob.consumerStrimzi()); + KubeResourceManager.get().createResourceWithWait( + continuousKafkaProducerConsumer.getProducer().getJob(), + continuousKafkaProducerConsumer.getConsumer().getJob() + ); // ############################## - KafkaClients clients = ClientUtils.getInstantPlainClientBuilder(testStorage).build(); - KubeResourceManager.get().createResourceWithWait(clients.producerStrimzi()); - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getProducer().getJob()); + + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); // Replace Jbod to bigger one volume to Kafka => triggers RU LOGGER.info("Replace JBOD to bigger one volume to the Kafka cluster {}", testStorage.getBrokerComponentName()); @@ -1109,13 +1173,13 @@ void testResizeJbodVolumes() { "data-" + vol0.getId() + "-" + testStorage.getClusterName() + "-", vol0.getSize()); - KubeResourceManager.get().createResourceWithWait(clients.consumerStrimzi()); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); // ############################## // Validate that continuous clients finished successfully // ############################## - ClientUtils.waitForContinuousClientSuccess(testStorage, continuousClientsMessageCount); + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getContinuousConsumerName(), testStorage.getContinuousProducerName(), continuousClientsMessageCount); // ############################## } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaVersionsST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaVersionsST.java index 3ebe1894a57..617cdd4f878 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaVersionsST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/KafkaVersionsST.java @@ -10,6 +10,7 @@ import io.skodjob.annotations.SuiteDoc; import io.skodjob.annotations.TestDoc; import io.skodjob.kubetest4j.resources.KubeResourceManager; +import io.strimzi.api.kafka.model.kafka.KafkaResources; import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; import io.strimzi.api.kafka.model.user.KafkaUser; @@ -17,7 +18,7 @@ import io.strimzi.systemtest.AbstractST; import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.docs.TestDocsLabels; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; +import io.strimzi.systemtest.kafkaclients.ClientsAuthentication; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; import io.strimzi.systemtest.storage.TestStorage; import io.strimzi.systemtest.templates.crd.KafkaNodePoolTemplates; @@ -26,6 +27,12 @@ import io.strimzi.systemtest.templates.crd.KafkaUserTemplates; import io.strimzi.systemtest.utils.ClientUtils; import io.strimzi.systemtest.utils.TestKafkaVersion; +import io.strimzi.testclients.clients.kafka.KafkaConsumerClient; +import io.strimzi.testclients.clients.kafka.KafkaConsumerClientBuilder; +import io.strimzi.testclients.clients.kafka.KafkaProducerClient; +import io.strimzi.testclients.clients.kafka.KafkaProducerClientBuilder; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.BeforeAll; @@ -168,33 +175,51 @@ void testKafkaWithVersion(final TestKafkaVersion testKafkaVersion) { ); LOGGER.info("Sending and receiving messages via PLAIN -> SCRAM-SHA"); - final KafkaClients kafkaClientsPlainScramShaWrite = ClientUtils.getInstantScramShaOverPlainClientBuilder(testStorage) - .withUsername(kafkaUserWrite) + + final KafkaProducerClient kafkaProducerPlainScramShaWrite = new KafkaProducerClientBuilder() + .withName(testStorage.getProducerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configurePlainScramSha(testStorage.getNamespaceName(), kafkaUserWrite)) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClientsPlainScramShaWrite.producerScramShaPlainStrimzi()); - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerPlainScramShaWrite.getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); - final KafkaClients kafkaClientsPlainScramShaRead = ClientUtils.getInstantScramShaOverPlainClientBuilder(testStorage) + final KafkaConsumerClient kafkaConsumerPlainScramShaRead = new KafkaConsumerClientBuilder() + .withName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) .withConsumerGroup(readConsumerGroup) - .withUsername(kafkaUserRead) + .withAuthentication(ClientsAuthentication.configurePlainScramSha(testStorage.getNamespaceName(), kafkaUserRead)) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClientsPlainScramShaRead.consumerScramShaPlainStrimzi()); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaConsumerPlainScramShaRead.getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); LOGGER.info("Sending and receiving messages via TLS"); - final KafkaClients kafkaClientsTlsScramShaRead = ClientUtils.getInstantTlsClientBuilder(testStorage) + final KafkaProducerConsumer kafkaProducerConsumerTls = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) .withConsumerGroup(readConsumerGroup) - .withUsername(kafkaUserReadWriteTls) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getClusterName(), kafkaUserReadWriteTls)) .build(); KubeResourceManager.get().createResourceWithWait( - kafkaClientsTlsScramShaRead.producerTlsStrimzi(testStorage.getClusterName()), - kafkaClientsTlsScramShaRead.consumerTlsStrimzi(testStorage.getClusterName()) + kafkaProducerConsumerTls.getProducer().getJob(), + kafkaProducerConsumerTls.getConsumer().getJob() ); - ClientUtils.waitForInstantClientSuccess(testStorage); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } @BeforeAll diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/QuotasST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/QuotasST.java index 789b7c7722d..86502c898f0 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/QuotasST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/QuotasST.java @@ -18,7 +18,7 @@ import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.ParallelNamespaceTest; import io.strimzi.systemtest.docs.TestDocsLabels; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; +import io.strimzi.systemtest.kafkaclients.ClientsAuthentication; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; import io.strimzi.systemtest.storage.TestStorage; import io.strimzi.systemtest.templates.crd.KafkaNodePoolTemplates; @@ -28,6 +28,8 @@ import io.strimzi.systemtest.utils.ClientUtils; import io.strimzi.systemtest.utils.StUtils; import io.strimzi.systemtest.utils.kubeUtils.controllers.JobUtils; +import io.strimzi.testclients.clients.kafka.KafkaProducerClient; +import io.strimzi.testclients.clients.kafka.KafkaProducerClientBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.hamcrest.Matchers; @@ -116,14 +118,19 @@ void testKafkaQuotasPluginIntegration() { KafkaUserTemplates.scramShaUser(testStorage).build() ); - KafkaClients clients = ClientUtils.getInstantPlainClientBuilder(testStorage, KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + final KafkaProducerClient kafkaProducerClient = new KafkaProducerClientBuilder() + .withName(testStorage.getProducerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) .withMessageCount(100000) + .withDelayMs(0) .withMessage(String.join("", Collections.nCopies(10000, "#"))) - .withAdditionalConfig("delivery.timeout.ms=10000\nrequest.timeout.ms=10000\n") + .withAdditionalConfig("delivery.timeout.ms=10005\nrequest.timeout.ms=10000\nlinger.ms=5") .build(); LOGGER.info("Sending messages without any user, we should hit the quota"); - KubeResourceManager.get().createResourceWithWait(clients.producerStrimzi()); + KubeResourceManager.get().createResourceWithWait(kafkaProducerClient.getJob()); // Kafka Quotas Plugin should stop producer after it reaches the minimum available bytes JobUtils.waitForJobContainingLogMessage(testStorage.getNamespaceName(), testStorage.getProducerName(), "Failed to send messages"); JobUtils.deleteJobWithWait(testStorage.getNamespaceName(), testStorage.getProducerName()); @@ -137,10 +144,11 @@ void testKafkaQuotasPluginIntegration() { LOGGER.info("Sending messages with user that is specified in list of excluded principals, we should be able to send the messages without problem"); - clients = ClientUtils.getInstantScramShaClientBuilder(testStorage, KafkaResources.bootstrapServiceName(testStorage.getClusterName()) + ":9095").build(); + kafkaProducerClient.setBootstrapAddress(KafkaResources.bootstrapServiceName(testStorage.getClusterName()) + ":9095"); + kafkaProducerClient.setAuthentication(ClientsAuthentication.configurePlainScramSha(testStorage.getNamespaceName(), testStorage.getUsername())); - KubeResourceManager.get().createResourceWithWait(clients.producerScramShaPlainStrimzi()); - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerClient.getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); } @ParallelNamespaceTest @@ -202,26 +210,34 @@ void testKafkaQuotasPluginWithBandwidthLimitation() { KafkaUserTemplates.scramShaUser(testStorage).build() ); - KafkaClients clients = ClientUtils.getInstantPlainClientBuilder(testStorage, KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + final KafkaProducerClient kafkaProducerClient = new KafkaProducerClientBuilder() + .withName(testStorage.getProducerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) .withMessageCount(100) .withMessage(String.join("", Collections.nCopies(2000, "#"))) .build(); LOGGER.info("Sending messages with normal user, quota applies"); long startTimeNormal = System.currentTimeMillis(); - KubeResourceManager.get().createResourceWithWait(clients.producerStrimzi()); - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + + KubeResourceManager.get().createResourceWithWait(kafkaProducerClient.getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); + long endTimeNormal = System.currentTimeMillis(); long durationNormal = endTimeNormal - startTimeNormal; LOGGER.info("Time taken for normal user: {} ms", durationNormal); // Measure time for excluded user LOGGER.info("Sending messages with excluded user, no quota applies"); - clients = ClientUtils.getInstantScramShaClientBuilder(testStorage, KafkaResources.bootstrapServiceName(testStorage.getClusterName()) + ":9095").build(); + kafkaProducerClient.setBootstrapAddress(KafkaResources.bootstrapServiceName(testStorage.getClusterName()) + ":9095"); + kafkaProducerClient.setAuthentication(ClientsAuthentication.configurePlainScramSha(testStorage.getNamespaceName(), testStorage.getUsername())); long startTimeExcluded = System.currentTimeMillis(); - KubeResourceManager.get().createResourceWithWait(clients.producerScramShaPlainStrimzi()); - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerClient.getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); + long endTimeExcluded = System.currentTimeMillis(); long durationExcluded = endTimeExcluded - startTimeExcluded; LOGGER.info("Time taken for excluded user: {} ms", durationExcluded); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/TieredStorageST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/TieredStorageST.java index 6f5b21ef7fc..860bcd8a29d 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/TieredStorageST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/TieredStorageST.java @@ -21,7 +21,6 @@ import io.strimzi.systemtest.annotations.MicroShiftNotSupported; import io.strimzi.systemtest.annotations.ParallelTest; import io.strimzi.systemtest.docs.TestDocsLabels; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; import io.strimzi.systemtest.kafkaclients.internalClients.admin.AdminClient; import io.strimzi.systemtest.resources.imageBuild.ImageBuild; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; @@ -30,7 +29,6 @@ import io.strimzi.systemtest.templates.crd.KafkaNodePoolTemplates; import io.strimzi.systemtest.templates.crd.KafkaTemplates; import io.strimzi.systemtest.templates.crd.KafkaTopicTemplates; -import io.strimzi.systemtest.templates.specific.AdminClientTemplates; import io.strimzi.systemtest.utils.AdminClientUtils; import io.strimzi.systemtest.utils.ClientUtils; import io.strimzi.systemtest.utils.kafkaUtils.KafkaTopicUtils; @@ -40,6 +38,10 @@ import io.strimzi.systemtest.utils.specific.SeaweedFSUtils; import io.strimzi.test.ReadWriteUtils; import io.strimzi.test.TestUtils; +import io.strimzi.testclients.clients.kafka.KafkaAdminClient; +import io.strimzi.testclients.clients.kafka.KafkaAdminClientBuilder; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.kafka.common.requests.ListOffsetsRequest; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -165,27 +167,33 @@ void testTieredStorageWithAivenS3Plugin() { .endSpec() .build()); - final KafkaClients clients = ClientUtils.getInstantPlainClientBuilder(testStorage) + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) .withMessageCount(MESSAGE_COUNT) .withDelayMs(1) .withMessage(String.join("", Collections.nCopies(300, "#"))) .build(); - KubeResourceManager.get().createResourceWithWait(clients.producerStrimzi()); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getProducer().getJob()); SeaweedFSUtils.waitForDataInSeaweedFS(suiteStorage.getNamespaceName(), BUCKET_NAME); + final KafkaAdminClient kafkaAdminClient = new KafkaAdminClientBuilder() + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withName(testStorage.getAdminName()) + .withNamespaceName(testStorage.getNamespaceName()) + .build(); + // Create admin-client to check offsets - KubeResourceManager.get().createResourceWithWait( - AdminClientTemplates.plainAdminClient( - testStorage.getNamespaceName(), - testStorage.getAdminName(), - KafkaResources.plainBootstrapAddress(testStorage.getClusterName()) - ).build() - ); + KubeResourceManager.get().createResourceWithWait(kafkaAdminClient.getDeployment()); waitForEarliestLocalOffsetGreaterThanZero(testStorage.getNamespaceName(), testStorage.getAdminName(), testStorage.getTopicName()); - KubeResourceManager.get().createResourceWithWait(clients.consumerStrimzi()); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); // Verify we can consume messages from (a) remote storage and (b) local storage. Because we have verified earlier // that the log segments are moved to remote storage (by SeaweedFS size check) and deleted locally (by earliest-local offset check), // we can verify (a) and (b) by checking if we can consume all messages successfully. @@ -288,29 +296,35 @@ void testTieredStorageWithAivenFileSystemPlugin() { .endSpec() .build()); - final KafkaClients clients = ClientUtils.getInstantPlainClientBuilder(testStorage) + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) .withMessageCount(MESSAGE_COUNT) .withDelayMs(1) .withMessage(String.join("", Collections.nCopies(300, "#"))) .build(); - KubeResourceManager.get().createResourceWithWait(clients.producerStrimzi()); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getProducer().getJob()); // wait for logs uploaded to NFS NfsUtils.waitForSizeInNfs(testStorage.getNamespaceName(), size -> size > SEGMENT_BYTE); // Create admin-client to check offsets - KubeResourceManager.get().createResourceWithWait( - AdminClientTemplates.plainAdminClient( - testStorage.getNamespaceName(), - testStorage.getAdminName(), - KafkaResources.plainBootstrapAddress(testStorage.getClusterName()) - ).build() - ); + final KafkaAdminClient kafkaAdminClient = new KafkaAdminClientBuilder() + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withName(testStorage.getAdminName()) + .withNamespaceName(testStorage.getNamespaceName()) + .build(); + // Create admin-client to check offsets + KubeResourceManager.get().createResourceWithWait(kafkaAdminClient.getDeployment()); waitForEarliestLocalOffsetGreaterThanZero(testStorage.getNamespaceName(), testStorage.getAdminName(), testStorage.getTopicName()); - KubeResourceManager.get().createResourceWithWait(clients.consumerStrimzi()); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), MESSAGE_COUNT); // Delete data diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java index b0ba4fc9c51..e8ef3e58c6e 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/ListenersST.java @@ -36,9 +36,8 @@ import io.strimzi.systemtest.annotations.OpenShiftOnly; import io.strimzi.systemtest.annotations.ParallelNamespaceTest; import io.strimzi.systemtest.docs.TestDocsLabels; +import io.strimzi.systemtest.kafkaclients.ClientsAuthentication; import io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder; import io.strimzi.systemtest.resources.CrdClients; import io.strimzi.systemtest.resources.crd.KafkaComponents; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; @@ -56,6 +55,8 @@ import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.SecretUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.ServiceUtils; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import io.vertx.core.json.JsonArray; import org.apache.kafka.common.config.SslClientAuth; import org.apache.kafka.common.config.SslConfigs; @@ -142,9 +143,22 @@ void testSendMessagesPlainAnonymous() { KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(testStorage).build()); LOGGER.info("Transmitting messages over plain transport and without auth.Bootstrap address: {}", KafkaResources.plainBootstrapAddress(testStorage.getClusterName())); - final KafkaClients kafkaClients = ClientUtils.getInstantPlainClients(testStorage); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerStrimzi(), kafkaClients.consumerStrimzi()); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); Service kafkaService = KubeResourceManager.get().kubeClient().getClient().services().inNamespace(testStorage.getNamespaceName()).withName(KafkaResources.bootstrapServiceName(testStorage.getClusterName())).get(); String kafkaServiceDiscoveryAnnotation = kafkaService.getMetadata().getAnnotations().get("strimzi.io/discovery"); @@ -206,21 +220,23 @@ void testSendMessagesTlsAuthenticated() { KafkaUserTemplates.tlsUser(testStorage).build() ); - KafkaClients kafkaClients = new KafkaClientsBuilder() + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() .withProducerName(testStorage.getProducerName()) .withConsumerName(testStorage.getConsumerName()) .withNamespaceName(testStorage.getNamespaceName()) - .withMessageCount(testStorage.getMessageCount()) - .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())) - .withUsername(testStorage.getUsername()) .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getClusterName(), testStorage.getUsername())) .build(); KubeResourceManager.get().createResourceWithWait( - kafkaClients.producerTlsStrimzi(testStorage.getClusterName()), - kafkaClients.consumerTlsStrimzi(testStorage.getClusterName()) + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() ); - ClientUtils.waitForInstantClientSuccess(testStorage); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); Service kafkaService = KubeResourceManager.get().kubeClient().getClient().services().inNamespace(testStorage.getNamespaceName()).withName(KafkaResources.bootstrapServiceName(testStorage.getClusterName())).get(); String kafkaServiceDiscoveryAnnotation = kafkaService.getMetadata().getAnnotations().get("strimzi.io/discovery"); @@ -276,9 +292,23 @@ void testSendMessagesPlainScramSha() { final String boostrapAddress = KafkaResources.bootstrapServiceName(testStorage.getClusterName()) + ":9095"; LOGGER.info("Transmitting messages over plain transport using scram sha auth with bootstrap address: {}", boostrapAddress); - final KafkaClients kafkaClients = ClientUtils.getInstantScramShaClients(testStorage, boostrapAddress); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerScramShaPlainStrimzi(), kafkaClients.consumerScramShaPlainStrimzi()); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(boostrapAddress) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configurePlainScramSha(testStorage.getNamespaceName(), testStorage.getUsername())) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); Service kafkaService = KubeResourceManager.get().kubeClient().getClient().services().inNamespace(testStorage.getNamespaceName()).withName(KafkaResources.bootstrapServiceName(testStorage.getClusterName())).get(); String kafkaServiceDiscoveryAnnotation = kafkaService.getMetadata().getAnnotations().get("strimzi.io/discovery"); @@ -347,12 +377,24 @@ void testSendMessagesTlsScramSha() { final String boostrapAddress = KafkaResources.bootstrapServiceName(testStorage.getClusterName()) + ":9096"; LOGGER.info("Transmitting messages over tls using scram sha auth with bootstrap address: {}", boostrapAddress); - KafkaClients kafkaClients = ClientUtils.getInstantScramShaClients(testStorage, boostrapAddress); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(boostrapAddress) + .withAuthentication(ClientsAuthentication.configureTlsScramSha(testStorage.getNamespaceName(), testStorage.getUsername(), testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .build(); + KubeResourceManager.get().createResourceWithWait( - kafkaClients.producerScramShaTlsStrimzi(testStorage.getClusterName()), - kafkaClients.consumerScramShaTlsStrimzi(testStorage.getClusterName()) + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() ); - ClientUtils.waitForInstantClientSuccess(testStorage); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); + LOGGER.info("Checking if generated password has {} characters", passwordLength); String password = KubeResourceManager.get().kubeClient().getClient().secrets().inNamespace(testStorage.getNamespaceName()).withName(testStorage.getUsername()).get().getData().get("password"); @@ -416,12 +458,23 @@ void testSendMessagesCustomListenerTlsScramSha() { final String boostrapAddress = KafkaResources.bootstrapServiceName(testStorage.getClusterName()) + ":9122"; LOGGER.info("Transmitting messages over tls using scram sha auth with bootstrap address: {}", boostrapAddress); - final KafkaClients kafkaClients = ClientUtils.getInstantScramShaClients(testStorage, boostrapAddress); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(boostrapAddress) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTlsScramSha(testStorage.getNamespaceName(), testStorage.getUsername(), testStorage.getClusterName())) + .build(); + KubeResourceManager.get().createResourceWithWait( - kafkaClients.producerScramShaTlsStrimzi(testStorage.getClusterName()), - kafkaClients.consumerScramShaTlsStrimzi(testStorage.getClusterName()) + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() ); - ClientUtils.waitForInstantClientSuccess(testStorage); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } /** @@ -550,47 +603,53 @@ void testSendMessagesCustomListenerTlsCustomization() { final String boostrapAddress = KafkaResources.bootstrapServiceName(testStorage.getClusterName()) + ":9122"; LOGGER.info("Transmitting messages over tls using tls auth with bootstrap address: {}", boostrapAddress); - final KafkaClients kafkaClients = ClientUtils.getInstantTlsClients(testStorage, boostrapAddress); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(boostrapAddress) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getClusterName(), customUserCertName1)) + .build(); // ########################################################### // Check that KafkaUser with ACLs can produce/consume to Kafka // Use user certs signed by custom root CA 1 // ########################################################### - kafkaClients.setUsername(customUserCertName1); KubeResourceManager.get().createResourceWithWait( - kafkaClients.producerTlsStrimzi(testStorage.getClusterName()), - kafkaClients.consumerTlsStrimzi(testStorage.getClusterName()) + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() ); - ClientUtils.waitForInstantClientSuccess(testStorage); - JobUtils.deleteJobsWithWait(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getConsumerName()); + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); // ########################################################### // Check that KafkaUser with ACLs cannot produce/consume to Kafka // due to wrong user certs signed by Strimzi CA // ########################################################### - kafkaClients.setUsername(testStorage.getUsername()); + kafkaProducerConsumer.setAuthentication(ClientsAuthentication.configureTls(testStorage.getClusterName(), testStorage.getUsername())); KubeResourceManager.get().createResourceWithWait( - kafkaClients.producerTlsStrimzi(testStorage.getClusterName()), - kafkaClients.consumerTlsStrimzi(testStorage.getClusterName()) + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() ); // TODO - we should rework this to allow timeout specification as this is not efficient - ClientUtils.waitForInstantClientsTimeout(testStorage); + ClientUtils.waitForClientsTimeout(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); JobUtils.deleteJobsWithWait(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getConsumerName()); // ########################################################### // Check that KafkaUser with superuser right can produce/consume to Kafka // Use user certs signed by custom root CA 2 // ########################################################### - kafkaClients.setUsername(customUserCertName2); + kafkaProducerConsumer.setAuthentication(ClientsAuthentication.configureTls(testStorage.getClusterName(), customUserCertName2)); KubeResourceManager.get().createResourceWithWait( - kafkaClients.producerTlsStrimzi(testStorage.getClusterName()), - kafkaClients.consumerTlsStrimzi(testStorage.getClusterName()) + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() ); - ClientUtils.waitForInstantClientSuccess(testStorage); - JobUtils.deleteJobsWithWait(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getConsumerName()); + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); // ########################################################### // Remove ssl.principal.mapping.rules config from Kafka listener @@ -622,12 +681,12 @@ void testSendMessagesCustomListenerTlsCustomization() { RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), testStorage.getBrokerSelector(), 3, kafkaSnapshot); // Check that KafkaUser with ACL rights cannot produce/consume to/from Kafka - kafkaClients.setUsername(testStorage.getUsername()); + kafkaProducerConsumer.setAuthentication(ClientsAuthentication.configureTls(testStorage.getClusterName(), testStorage.getUsername())); KubeResourceManager.get().createResourceWithWait( - kafkaClients.producerTlsStrimzi(testStorage.getClusterName()), - kafkaClients.consumerTlsStrimzi(testStorage.getClusterName()) + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() ); - ClientUtils.waitForInstantClientsTimeout(testStorage); + ClientUtils.waitForClientsTimeout(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } @ParallelNamespaceTest @@ -1032,9 +1091,22 @@ void testClusterIp() { .build()); final String clusterIPBoostrapAddress = KafkaUtils.bootstrapAddressFromStatus(testStorage.getNamespaceName(), testStorage.getClusterName(), TestConstants.CLUSTER_IP_LISTENER_DEFAULT_NAME); - final KafkaClients kafkaClients = ClientUtils.getInstantPlainClients(testStorage, clusterIPBoostrapAddress); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerStrimzi(), kafkaClients.consumerStrimzi()); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(clusterIPBoostrapAddress) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } @ParallelNamespaceTest @@ -1076,12 +1148,23 @@ void testClusterIpTls() { KubeResourceManager.get().createResourceWithWait(KafkaUserTemplates.tlsUser(testStorage).build()); final String clusterIPBoostrapAddress = KafkaUtils.bootstrapAddressFromStatus(testStorage.getNamespaceName(), testStorage.getClusterName(), TestConstants.CLUSTER_IP_LISTENER_DEFAULT_NAME); - final KafkaClients kafkaClients = ClientUtils.getInstantTlsClients(testStorage, clusterIPBoostrapAddress); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(clusterIPBoostrapAddress) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getClusterName(), testStorage.getUsername())) + .build(); + KubeResourceManager.get().createResourceWithWait( - kafkaClients.producerTlsStrimzi(testStorage.getClusterName()), - kafkaClients.consumerTlsStrimzi(testStorage.getClusterName()) + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() ); - ClientUtils.waitForInstantClientSuccess(testStorage); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } // ########################################## @@ -1170,20 +1253,24 @@ void testCustomSoloCertificatesForNodePort() { externalKafkaClient.receiveMessagesTls() ); - KafkaClients kafkaClients = ClientUtils.getInstantTlsClientBuilder(testStorage, KafkaResources.bootstrapServiceName(testStorage.getClusterName()) + ":9104") + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) .withConsumerGroup("consumer-group-certs-1") - .withCaCertSecretName(clusterCustomCertServer1) + .withBootstrapAddress(KafkaResources.bootstrapServiceName(testStorage.getClusterName()) + ":9104") + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTlsCustomCerts(clusterCustomCertServer1, testStorage.getUsername())) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getProducer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); - kafkaClients = new KafkaClientsBuilder(kafkaClients) - .withMessageCount(2 * testStorage.getMessageCount()) - .build(); + kafkaProducerConsumer.setMessageCount(2 * testStorage.getMessageCount()); - KubeResourceManager.get().createResourceWithWait(kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); } @ParallelNamespaceTest @@ -1276,21 +1363,24 @@ void testCustomChainCertificatesForNodePort() { externalKafkaClient.receiveMessagesTls() ); - KafkaClients kafkaClients = ClientUtils.getInstantPlainClientBuilder(testStorage, KafkaResources.bootstrapServiceName(testStorage.getClusterName()) + ":9106") - .withUsername(testStorage.getUsername()) - .withCaCertSecretName(clusterCustomCertChain1) + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) .withConsumerGroup("consumer-group-certs-2") + .withBootstrapAddress(KafkaResources.bootstrapServiceName(testStorage.getClusterName()) + ":9106") + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTlsCustomCerts(clusterCustomCertChain1, testStorage.getUsername())) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getProducer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); - kafkaClients = new KafkaClientsBuilder(kafkaClients) - .withMessageCount(2 * testStorage.getMessageCount()) - .build(); + kafkaProducerConsumer.setMessageCount(2 * testStorage.getMessageCount()); - KubeResourceManager.get().createResourceWithWait(kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); } @ParallelNamespaceTest @@ -1375,20 +1465,24 @@ void testCustomSoloCertificatesForLoadBalancer() { externalKafkaClient.receiveMessagesTls() ); - KafkaClients kafkaClients = ClientUtils.getInstantTlsClientBuilder(testStorage, KafkaResources.bootstrapServiceName(testStorage.getClusterName()) + ":9107") + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) .withConsumerGroup("consumer-group-certs-3") - .withCaCertSecretName(clusterCustomCertServer1) + .withBootstrapAddress(KafkaResources.bootstrapServiceName(testStorage.getClusterName()) + ":9107") + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTlsCustomCerts(clusterCustomCertServer1, testStorage.getUsername())) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getProducer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); - kafkaClients = new KafkaClientsBuilder(kafkaClients) - .withMessageCount(2 * testStorage.getMessageCount()) - .build(); + kafkaProducerConsumer.setMessageCount(2 * testStorage.getMessageCount()); - KubeResourceManager.get().createResourceWithWait(kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); } @ParallelNamespaceTest @@ -1483,20 +1577,24 @@ void testCustomChainCertificatesForLoadBalancer() { externalKafkaClient.receiveMessagesTls() ); - KafkaClients kafkaClients = ClientUtils.getInstantTlsClientBuilder(testStorage, KafkaResources.bootstrapServiceName(testStorage.getClusterName()) + ":9109") + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) .withConsumerGroup("consumer-group-certs-4") - .withCaCertSecretName(clusterCustomCertChain1) + .withBootstrapAddress(KafkaResources.bootstrapServiceName(testStorage.getClusterName()) + ":9109") + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTlsCustomCerts(clusterCustomCertChain1, testStorage.getUsername())) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getProducer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); - kafkaClients = new KafkaClientsBuilder(kafkaClients) - .withMessageCount(2 * testStorage.getMessageCount()) - .build(); + kafkaProducerConsumer.setMessageCount(2 * testStorage.getMessageCount()); - KubeResourceManager.get().createResourceWithWait(kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); } @ParallelNamespaceTest @@ -1585,20 +1683,24 @@ void testCustomSoloCertificatesForRoute() { externalKafkaClient.receiveMessagesTls() ); - KafkaClients kafkaClients = ClientUtils.getInstantTlsClientBuilder(testStorage, KafkaResources.bootstrapServiceName(testStorage.getClusterName()) + ":9111") + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) .withConsumerGroup("consumer-group-certs-5") - .withCaCertSecretName(clusterCustomCertServer1) + .withBootstrapAddress(KafkaResources.bootstrapServiceName(testStorage.getClusterName()) + ":9111") + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTlsCustomCerts(clusterCustomCertServer1, testStorage.getUsername())) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getProducer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); - kafkaClients = new KafkaClientsBuilder(kafkaClients) - .withMessageCount(2 * testStorage.getMessageCount()) - .build(); + kafkaProducerConsumer.setMessageCount(2 * testStorage.getMessageCount()); - KubeResourceManager.get().createResourceWithWait(kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); } @ParallelNamespaceTest @@ -1692,20 +1794,24 @@ void testCustomChainCertificatesForRoute() { externalKafkaClient.receiveMessagesTls() ); - KafkaClients kafkaClients = ClientUtils.getInstantTlsClientBuilder(testStorage, KafkaResources.bootstrapServiceName(testStorage.getClusterName()) + ":9112") + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) .withConsumerGroup("consumer-group-certs-6") - .withCaCertSecretName(clusterCustomCertChain1) + .withBootstrapAddress(KafkaResources.bootstrapServiceName(testStorage.getClusterName()) + ":9112") + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTlsCustomCerts(clusterCustomCertChain1, testStorage.getUsername())) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getProducer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); - kafkaClients = new KafkaClientsBuilder(kafkaClients) - .withMessageCount(2 * testStorage.getMessageCount()) - .build(); + kafkaProducerConsumer.setMessageCount(2 * testStorage.getMessageCount()); - KubeResourceManager.get().createResourceWithWait(kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); } @ParallelNamespaceTest @@ -1852,20 +1958,24 @@ void testCustomCertLoadBalancerAndTlsRollingUpdate() { externalKafkaClient.receiveMessagesTls() ); - KafkaClients kafkaClients = ClientUtils.getInstantTlsClientBuilder(testStorage, KafkaResources.bootstrapServiceName(testStorage.getClusterName()) + ":9113") + KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) .withConsumerGroup("consumer-group-certs-6") - .withCaCertSecretName(clusterCustomCertServer2) + .withBootstrapAddress(KafkaResources.bootstrapServiceName(testStorage.getClusterName()) + ":9113") + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTlsCustomCerts(clusterCustomCertServer2, testStorage.getUsername())) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getProducer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); - kafkaClients = new KafkaClientsBuilder(kafkaClients) - .withMessageCount(3 * testStorage.getMessageCount()) - .build(); + kafkaProducerConsumer.setMessageCount(3 * testStorage.getMessageCount()); - KubeResourceManager.get().createResourceWithWait(kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount() * 3); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), 3 * testStorage.getMessageCount()); externalCerts = getKafkaStatusCertificates(testStorage.getNamespaceName(), TestConstants.EXTERNAL_LISTENER_DEFAULT_NAME, testStorage.getClusterName()); externalSecretCerts = getKafkaSecretCertificates(testStorage.getNamespaceName(), clusterCustomCertServer1, "ca.crt"); @@ -1887,19 +1997,19 @@ void testCustomCertLoadBalancerAndTlsRollingUpdate() { externalKafkaClient.receiveMessagesTls() ); - kafkaClients = new KafkaClientsBuilder(kafkaClients) + kafkaProducerConsumer = new KafkaProducerConsumerBuilder(kafkaProducerConsumer) .withConsumerGroup("consumer-group-certs-71") .withMessageCount(testStorage.getMessageCount()) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getProducer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); - kafkaClients = new KafkaClientsBuilder(kafkaClients) + kafkaProducerConsumer = new KafkaProducerConsumerBuilder(kafkaProducerConsumer) .withMessageCount(testStorage.getMessageCount() * 5) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount() * 5); KafkaUtils.replace(testStorage.getNamespaceName(), testStorage.getClusterName(), kafka -> { @@ -1958,12 +2068,12 @@ void testCustomCertLoadBalancerAndTlsRollingUpdate() { externalKafkaClient.receiveMessagesTls() ); - kafkaClients = new KafkaClientsBuilder(kafkaClients) + kafkaProducerConsumer = new KafkaProducerConsumerBuilder(kafkaProducerConsumer) .withConsumerGroup("consumer-group-certs-83") .withMessageCount(testStorage.getMessageCount() * 6) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount() * 6); } @@ -2111,21 +2221,25 @@ void testCustomCertNodePortAndTlsRollingUpdate() { int expectedMessageCountForNewGroup = testStorage.getMessageCount() * 3; - KafkaClients kafkaClients = ClientUtils.getInstantTlsClientBuilder(testStorage, KafkaResources.bootstrapServiceName(testStorage.getClusterName()) + ":9115") + KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) .withConsumerGroup("consumer-group-certs-71") - .withCaCertSecretName(clusterCustomCertServer2) + .withBootstrapAddress(KafkaResources.bootstrapServiceName(testStorage.getClusterName()) + ":9115") + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTlsCustomCerts(clusterCustomCertServer2, testStorage.getUsername())) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getProducer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); int expectedMessageCountForExternalClient = testStorage.getMessageCount(); - kafkaClients = new KafkaClientsBuilder(kafkaClients) - .withMessageCount(expectedMessageCountForNewGroup) - .build(); + kafkaProducerConsumer.setMessageCount(expectedMessageCountForExternalClient); - KubeResourceManager.get().createResourceWithWait(kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount() * 3); SecretUtils.updateCustomCertSecret(testStorage.getNamespaceName(), testStorage.getClusterName(), clusterCustomCertServer1, strimziCertAndKey2); @@ -2150,22 +2264,22 @@ void testCustomCertNodePortAndTlsRollingUpdate() { externalKafkaClient.receiveMessagesTls() ); - kafkaClients = new KafkaClientsBuilder(kafkaClients) + kafkaProducerConsumer = new KafkaProducerConsumerBuilder(kafkaProducerConsumer) .withConsumerGroup("consumer-group-certs-72") .withMessageCount(testStorage.getMessageCount()) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getProducer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); expectedMessageCountForNewGroup += testStorage.getMessageCount(); - kafkaClients = new KafkaClientsBuilder(kafkaClients) + kafkaProducerConsumer = new KafkaProducerConsumerBuilder(kafkaProducerConsumer) .withMessageCount(expectedMessageCountForNewGroup) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); KafkaUtils.replace(testStorage.getNamespaceName(), testStorage.getClusterName(), kafka -> { kafka.getSpec().getKafka().setListeners(asList( @@ -2215,13 +2329,13 @@ void testCustomCertNodePortAndTlsRollingUpdate() { externalKafkaClient.receiveMessagesTls() ); - kafkaClients = new KafkaClientsBuilder(kafkaClients) + kafkaProducerConsumer = new KafkaProducerConsumerBuilder(kafkaProducerConsumer) .withConsumerGroup("consumer-group-certs-73") .withMessageCount(expectedMessageCountForNewGroup) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); } @ParallelNamespaceTest @@ -2371,20 +2485,24 @@ void testCustomCertRouteAndTlsRollingUpdate() { externalKafkaClient.receiveMessagesTls() ); - KafkaClients kafkaClients = ClientUtils.getInstantTlsClientBuilder(testStorage, KafkaResources.bootstrapServiceName(testStorage.getClusterName()) + ":9117") - .withCaCertSecretName(clusterCustomCertServer2) + KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) .withConsumerGroup("consumer-group-certs-91") + .withBootstrapAddress(KafkaResources.bootstrapServiceName(testStorage.getClusterName()) + ":9117") + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTlsCustomCerts(clusterCustomCertServer2, testStorage.getUsername())) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getProducer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); - kafkaClients = new KafkaClientsBuilder(kafkaClients) - .withMessageCount(testStorage.getMessageCount() * 3) - .build(); + kafkaProducerConsumer.setMessageCount(3 * testStorage.getMessageCount()); - KubeResourceManager.get().createResourceWithWait(kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); // Delete already existing secrets SecretUtils.deleteSecretWithWait(testStorage.getNamespaceName(), clusterCustomCertServer1); @@ -2416,20 +2534,20 @@ void testCustomCertRouteAndTlsRollingUpdate() { externalKafkaClient.receiveMessagesTls() ); - kafkaClients = new KafkaClientsBuilder(kafkaClients) + kafkaProducerConsumer = new KafkaProducerConsumerBuilder(kafkaProducerConsumer) .withConsumerGroup("consumer-group-certs-92") .withMessageCount(testStorage.getMessageCount()) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getProducer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); - kafkaClients = new KafkaClientsBuilder(kafkaClients) + kafkaProducerConsumer = new KafkaProducerConsumerBuilder(kafkaProducerConsumer) .withMessageCount(testStorage.getMessageCount() * 5) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); KafkaUtils.replace(testStorage.getNamespaceName(), testStorage.getClusterName(), kafka -> { kafka.getSpec().getKafka().setListeners(asList( @@ -2491,13 +2609,13 @@ void testCustomCertRouteAndTlsRollingUpdate() { externalKafkaClient.receiveMessagesTls() ); - kafkaClients = new KafkaClientsBuilder(kafkaClients) + kafkaProducerConsumer = new KafkaProducerConsumerBuilder(kafkaProducerConsumer) .withMessageCount(testStorage.getMessageCount() * 6) .withConsumerGroup("consumer-group-certs-93") .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); } @ParallelNamespaceTest @@ -2724,12 +2842,23 @@ void testMessagesTlsScramShaWithPredefinedPassword() { final String boostrapAddress = KafkaResources.bootstrapServiceName(testStorage.getClusterName()) + ":9096"; LOGGER.info("Transmitting messages over tls using scram sha auth with bootstrap address: {} with predefined password", boostrapAddress); - KafkaClients kafkaClients = ClientUtils.getInstantScramShaClients(testStorage, boostrapAddress); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(boostrapAddress) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTlsScramSha(testStorage.getNamespaceName(), testStorage.getUsername(), testStorage.getClusterName())) + .build(); + KubeResourceManager.get().createResourceWithWait( - kafkaClients.producerScramShaTlsStrimzi(testStorage.getClusterName()), - kafkaClients.consumerScramShaTlsStrimzi(testStorage.getClusterName()) + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() ); - ClientUtils.waitForInstantClientSuccess(testStorage); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); LOGGER.info("Changing password in Secret: {}/{}, we should be able to send/receive messages", testStorage.getNamespaceName(), secretName); @@ -2742,13 +2871,16 @@ void testMessagesTlsScramShaWithPredefinedPassword() { LOGGER.info("Receiving messages with new password"); - kafkaClients.generateNewConsumerGroup(); + kafkaProducerConsumer.setConsumerGroup(ClientUtils.generateRandomConsumerGroup()); + // we need to take the updated password from the Secret again + kafkaProducerConsumer.setAuthentication(ClientsAuthentication.configureTlsScramSha(testStorage.getNamespaceName(), testStorage.getUsername(), testStorage.getClusterName())); KubeResourceManager.get().createResourceWithWait( - kafkaClients.producerScramShaTlsStrimzi(testStorage.getClusterName()), - kafkaClients.consumerScramShaTlsStrimzi(testStorage.getClusterName()) + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() ); - ClientUtils.waitForInstantClientSuccess(testStorage); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } @Tag(NODEPORT_SUPPORTED) diff --git a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/MultipleListenersST.java b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/MultipleListenersST.java index 68f7ab729f8..9e182f9cb4a 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/MultipleListenersST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/kafka/listeners/MultipleListenersST.java @@ -21,9 +21,8 @@ import io.strimzi.systemtest.annotations.IsolatedTest; import io.strimzi.systemtest.annotations.OpenShiftOnly; import io.strimzi.systemtest.docs.TestDocsLabels; +import io.strimzi.systemtest.kafkaclients.ClientsAuthentication; import io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; import io.strimzi.systemtest.storage.TestStorage; import io.strimzi.systemtest.templates.crd.KafkaNodePoolTemplates; @@ -33,6 +32,8 @@ import io.strimzi.systemtest.utils.ClientUtils; import io.strimzi.systemtest.utils.kafkaUtils.KafkaTopicUtils; import io.strimzi.systemtest.utils.kafkaUtils.KafkaUserUtils; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.kafka.common.security.auth.SecurityProtocol; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -307,28 +308,23 @@ private void runListenersTest(List listeners, String clust } } else { // using internal clients - KafkaClients kafkaClients = new KafkaClientsBuilder() + KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() .withTopicName(topicName) .withMessageCount(testStorage.getMessageCount()) .withProducerName(producerName) .withConsumerName(consumerName) - .withUsername(kafkaUsername) .withNamespaceName(Environment.TEST_SUITE_NAMESPACE) .withBootstrapAddress(KafkaResources.bootstrapServiceName(clusterName) + ":" + listener.getPort()) .build(); if (isTlsEnabled) { - // verify phase - KubeResourceManager.get().createResourceWithWait( - kafkaClients.producerTlsStrimzi(clusterName), - kafkaClients.consumerTlsStrimzi(clusterName) - ); - } else { - KubeResourceManager.get().createResourceWithWait( - kafkaClients.producerStrimzi(), - kafkaClients.consumerStrimzi() - ); + kafkaProducerConsumer.setAuthentication(ClientsAuthentication.configureTls(clusterName, kafkaUsername)); } + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); ClientUtils.waitForClientsSuccess(Environment.TEST_SUITE_NAMESPACE, consumerName, producerName, testStorage.getMessageCount()); } } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/metrics/MetricsST.java b/systemtest/src/test/java/io/strimzi/systemtest/metrics/MetricsST.java index ca44f03d728..972931c1330 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/metrics/MetricsST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/metrics/MetricsST.java @@ -11,6 +11,7 @@ import io.fabric8.kubernetes.api.model.ConfigMapKeySelector; import io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder; import io.fabric8.kubernetes.api.model.LabelSelector; +import io.fabric8.kubernetes.api.model.LabelSelectorBuilder; import io.skodjob.annotations.Desc; import io.skodjob.annotations.Label; import io.skodjob.annotations.Step; @@ -37,10 +38,6 @@ import io.strimzi.systemtest.annotations.IsolatedTest; import io.strimzi.systemtest.annotations.ParallelTest; import io.strimzi.systemtest.docs.TestDocsLabels; -import io.strimzi.systemtest.kafkaclients.internalClients.BridgeClients; -import io.strimzi.systemtest.kafkaclients.internalClients.BridgeClientsBuilder; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder; import io.strimzi.systemtest.labels.LabelSelectors; import io.strimzi.systemtest.performance.gather.collectors.BaseMetricsCollector; import io.strimzi.systemtest.resources.crd.KafkaComponents; @@ -65,6 +62,10 @@ import io.strimzi.systemtest.utils.kubeUtils.objects.NetworkPolicyUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils; import io.strimzi.systemtest.utils.specific.CruiseControlUtils; +import io.strimzi.testclients.clients.http.HttpProducerConsumer; +import io.strimzi.testclients.clients.http.HttpProducerConsumerBuilder; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.BeforeAll; @@ -248,16 +249,20 @@ void testKafkaExporterMetrics() { final String kafkaStrimziPodSetName = KafkaComponents.getBrokerPodSetName(kafkaClusterFirstName); final LabelSelector brokerPodsSelector = LabelSelectors.kafkaLabelSelector(kafkaClusterFirstName, kafkaStrimziPodSetName); - KafkaClients kafkaClients = new KafkaClientsBuilder() + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withNamespaceName(namespaceFirst) .withTopicName(kafkaExporterTopicName) .withBootstrapAddress(KafkaResources.plainBootstrapAddress(kafkaClusterFirstName)) - .withNamespaceName(namespaceFirst) .withMessageCount(5000) - .withProducerName(testStorage.getProducerName()) - .withConsumerName(testStorage.getConsumerName()) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerStrimzi(), kafkaClients.consumerStrimzi()); + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); ClientUtils.waitForClientsSuccess(namespaceFirst, testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount(), false); assertMetricValueNotNull(kafkaExporterCollector, "kafka_consumergroup_current_offset\\{.*\\}"); @@ -469,22 +474,25 @@ void testKafkaBridgeMetrics() { .withComponent(KafkaBridgeMetricsComponent.create(namespaceFirst, bridgeClusterName)) .build(); + NetworkPolicyUtils.allowNetworkPolicySettingsForBridgeClients(testStorage.getNamespaceName(), testStorage.getProducerName(), new LabelSelectorBuilder().addToMatchLabels("app", testStorage.getProducerName()).build(), KafkaBridgeResources.componentName(bridgeClusterName)); + NetworkPolicyUtils.allowNetworkPolicySettingsForBridgeClients(testStorage.getNamespaceName(), testStorage.getConsumerName(), new LabelSelectorBuilder().addToMatchLabels("app", testStorage.getConsumerName()).build(), KafkaBridgeResources.componentName(bridgeClusterName)); // Attach consumer before producer - BridgeClients kafkaBridgeClientJob = new BridgeClientsBuilder() + HttpProducerConsumer httpProducerConsumer = new HttpProducerConsumerBuilder() .withNamespaceName(namespaceFirst) .withProducerName(testStorage.getProducerName()) .withConsumerName(testStorage.getConsumerName()) - .withBootstrapAddress(KafkaBridgeResources.serviceName(bridgeClusterName)) - .withComponentName(KafkaBridgeResources.componentName(bridgeClusterName)) + .withHostname(KafkaBridgeResources.serviceName(bridgeClusterName)) .withTopicName(bridgeTopicName) .withMessageCount(testStorage.getMessageCount()) .withPort(TestConstants.HTTP_BRIDGE_DEFAULT_PORT) .withDelayMs(200) - .withPollInterval(200) .build(); // we cannot wait for producer and consumer to complete to see all needed metrics - especially `strimzi_bridge_kafka_producer_count` - KubeResourceManager.get().createResourceWithWait(kafkaBridgeClientJob.producerStrimziBridge(), kafkaBridgeClientJob.consumerStrimziBridge()); + KubeResourceManager.get().createResourceWithWait( + httpProducerConsumer.getProducer().getJob(), + httpProducerConsumer.getConsumer().getJob() + ); bridgeCollector.collectMetricsFromPods(TestConstants.METRICS_COLLECT_TIMEOUT); assertMetricValueNotNull(bridgeCollector, "strimzi_bridge_kafka_producer_count\\{.*}"); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/metrics/StrimziMetricsReporterST.java b/systemtest/src/test/java/io/strimzi/systemtest/metrics/StrimziMetricsReporterST.java index 7fcf972e526..a0b8d2ee572 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/metrics/StrimziMetricsReporterST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/metrics/StrimziMetricsReporterST.java @@ -20,8 +20,6 @@ import io.strimzi.systemtest.annotations.IsolatedTest; import io.strimzi.systemtest.annotations.ParallelTest; import io.strimzi.systemtest.docs.TestDocsLabels; -import io.strimzi.systemtest.kafkaclients.internalClients.BridgeClients; -import io.strimzi.systemtest.kafkaclients.internalClients.BridgeClientsBuilder; import io.strimzi.systemtest.labels.LabelSelectors; import io.strimzi.systemtest.performance.gather.collectors.BaseMetricsCollector; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; @@ -39,6 +37,8 @@ import io.strimzi.systemtest.utils.kubeUtils.objects.NetworkPolicyUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils; import io.strimzi.systemtest.utils.specific.MetricsUtils; +import io.strimzi.testclients.clients.http.HttpProducerConsumer; +import io.strimzi.testclients.clients.http.HttpProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.BeforeAll; @@ -254,22 +254,22 @@ void testKafkaBridgeMetrics() { NetworkPolicyUtils.allowNetworkPolicySettingsForBridgeScraper(testStorage.getNamespaceName(), testStorage.getScraperName(), KafkaBridgeResources.componentName(BRIDGE_NAME)); - BridgeClients kafkaBridgeClientJob = new BridgeClientsBuilder() + // Attach consumer before producer + HttpProducerConsumer httpProducerConsumer = new HttpProducerConsumerBuilder() .withNamespaceName(testStorage.getNamespaceName()) .withProducerName(testStorage.getProducerName()) .withConsumerName(testStorage.getConsumerName()) - .withBootstrapAddress(KafkaBridgeResources.serviceName(BRIDGE_NAME)) - .withComponentName(KafkaBridgeResources.componentName(BRIDGE_NAME)) + .withHostname(KafkaBridgeResources.serviceName(BRIDGE_NAME)) .withTopicName(testStorage.getTopicName()) .withMessageCount(testStorage.getMessageCount()) .withPort(TestConstants.HTTP_BRIDGE_DEFAULT_PORT) .withDelayMs(200) - .withPollInterval(200) .build(); - KubeResourceManager.get().createResourceWithoutWait( - kafkaBridgeClientJob.producerStrimziBridge(), - kafkaBridgeClientJob.consumerStrimziBridge() + // we cannot wait for producer and consumer to complete to see all needed metrics - especially `strimzi_bridge_kafka_producer_count` + KubeResourceManager.get().createResourceWithWait( + httpProducerConsumer.getProducer().getJob(), + httpProducerConsumer.getConsumer().getJob() ); BaseMetricsCollector bridgeCollector = kafkaCollector.toBuilder() diff --git a/systemtest/src/test/java/io/strimzi/systemtest/mirrormaker/MirrorMaker2ST.java b/systemtest/src/test/java/io/strimzi/systemtest/mirrormaker/MirrorMaker2ST.java index d82a8f45acd..2b177dd26fd 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/mirrormaker/MirrorMaker2ST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/mirrormaker/MirrorMaker2ST.java @@ -31,8 +31,7 @@ import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.ParallelNamespaceTest; import io.strimzi.systemtest.docs.TestDocsLabels; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder; +import io.strimzi.systemtest.kafkaclients.ClientsAuthentication; import io.strimzi.systemtest.kafkaclients.internalClients.admin.AdminClient; import io.strimzi.systemtest.kafkaclients.internalClients.admin.KafkaTopicDescription; import io.strimzi.systemtest.labels.LabelSelectors; @@ -46,7 +45,6 @@ import io.strimzi.systemtest.templates.crd.KafkaTemplates; import io.strimzi.systemtest.templates.crd.KafkaTopicTemplates; import io.strimzi.systemtest.templates.crd.KafkaUserTemplates; -import io.strimzi.systemtest.templates.specific.AdminClientTemplates; import io.strimzi.systemtest.templates.specific.ScraperTemplates; import io.strimzi.systemtest.utils.AdminClientUtils; import io.strimzi.systemtest.utils.ClientUtils; @@ -66,6 +64,10 @@ import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.SecretUtils; import io.strimzi.test.TestUtils; +import io.strimzi.testclients.clients.kafka.KafkaAdminClient; +import io.strimzi.testclients.clients.kafka.KafkaAdminClientBuilder; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.BeforeAll; @@ -165,9 +167,22 @@ void testMirrorMaker2() { LOGGER.info("Messages exchange - Topic: {}, cluster: {} and message count of {}", testStorage.getTopicName(), testStorage.getSourceClusterName(), testStorage.getMessageCount()); - final KafkaClients sourceClients = ClientUtils.getInstantPlainClients(testStorage, KafkaResources.plainBootstrapAddress(testStorage.getSourceClusterName())); - KubeResourceManager.get().createResourceWithWait(sourceClients.producerStrimzi(), sourceClients.consumerStrimzi()); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer sourceKafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getSourceClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait( + sourceKafkaProducerConsumer.getProducer().getJob(), + sourceKafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); LOGGER.info("Verifying configurations in config map"); ConfigMap configMap = KubeResourceManager.get().kubeClient().getClient().configMaps().inNamespace(testStorage.getNamespaceName()).withName(KafkaMirrorMaker2Resources.configMapName(testStorage.getClusterName())).get(); @@ -184,13 +199,23 @@ void testMirrorMaker2() { LOGGER.info("Now setting Topic to {} and cluster to {} - the messages should be mirrored", testStorage.getMirroredSourceTopicName(), testStorage.getTargetClusterName()); - final KafkaClients targetClients = ClientUtils.getInstantPlainClientBuilder(testStorage, KafkaResources.plainBootstrapAddress(testStorage.getTargetClusterName())) + final KafkaProducerConsumer targetKafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) .withTopicName(testStorage.getMirroredSourceTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getTargetClusterName())) + .withMessageCount(testStorage.getMessageCount()) .build(); + KubeResourceManager.get().createResourceWithWait( + targetKafkaProducerConsumer.getProducer().getJob(), + targetKafkaProducerConsumer.getConsumer().getJob() + ); + LOGGER.info("Consumer in target cluster and Topic should consume {} messages", testStorage.getMessageCount()); - KubeResourceManager.get().createResourceWithWait(targetClients.consumerStrimzi()); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); LOGGER.info("Mirrored successful"); @@ -201,13 +226,14 @@ void testMirrorMaker2() { RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), testStorage.getMM2Selector(), mirrorMakerReplicasCount, mm2PodsSnapshot); - KubeResourceManager.get().createResourceWithWait( - AdminClientTemplates.plainAdminClient( - testStorage.getNamespaceName(), - testStorage.getAdminName(), - KafkaResources.plainBootstrapAddress(testStorage.getTargetClusterName()) - ).build() - ); + final KafkaAdminClient kafkaAdminClient = new KafkaAdminClientBuilder() + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getTargetClusterName())) + .withName(testStorage.getAdminName()) + .withNamespaceName(testStorage.getNamespaceName()) + .build(); + + KubeResourceManager.get().createResourceWithWait(kafkaAdminClient.getDeployment()); + final AdminClient targetClusterAdminClient = AdminClientUtils.getConfiguredAdminClient(testStorage.getNamespaceName(), testStorage.getAdminName()); LOGGER.info("Verifying topic {} has expected partitions: {}", testStorage.getMirroredSourceTopicName(), 3); @@ -301,11 +327,23 @@ void testMirrorMaker2TlsAndTlsClientAuth() { // Check brokers availability LOGGER.info("Messages exchange - Topic: {}, cluster: {}", testStorage.getSourceClusterName(), testStorage.getMessageCount()); - final KafkaClients sourceClients = ClientUtils.getInstantTlsClientBuilder(testStorage, KafkaResources.tlsBootstrapAddress(testStorage.getSourceClusterName())) - .withUsername(testStorage.getSourceUsername()) + final KafkaProducerConsumer sourceKafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getSourceClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getSourceClusterName(), testStorage.getSourceUsername())) .build(); - KubeResourceManager.get().createResourceWithWait(sourceClients.producerTlsStrimzi(testStorage.getSourceClusterName()), sourceClients.consumerTlsStrimzi(testStorage.getSourceClusterName())); - ClientUtils.waitForInstantClientSuccess(testStorage); + + KubeResourceManager.get().createResourceWithWait( + sourceKafkaProducerConsumer.getProducer().getJob(), + sourceKafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); KubeResourceManager.get().createResourceWithWait(KafkaMirrorMaker2Templates.kafkaMirrorMaker2(testStorage, 1, true) .editSpec() @@ -339,25 +377,35 @@ void testMirrorMaker2TlsAndTlsClientAuth() { .build()); LOGGER.info("Consuming from mirrored Topic: {}, cluster: {}, user: {}", testStorage.getMirroredSourceTopicName(), testStorage.getTargetClusterName(), testStorage.getTargetClusterName()); - final KafkaClients targetClients = ClientUtils.getInstantTlsClientBuilder(testStorage, KafkaResources.tlsBootstrapAddress(testStorage.getTargetClusterName())) + final KafkaProducerConsumer targetKafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) .withTopicName(testStorage.getMirroredSourceTopicName()) - .withUsername(testStorage.getTargetUsername()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getTargetClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getTargetClusterName(), testStorage.getTargetUsername())) .build(); - KubeResourceManager.get().createResourceWithWait(targetClients.consumerTlsStrimzi(testStorage.getTargetClusterName())); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + + KubeResourceManager.get().createResourceWithWait( + targetKafkaProducerConsumer.getProducer().getJob(), + targetKafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); LOGGER.info("Checking topic is mirrored correctly in target cluster"); // Deploy kafka admin on communicating with target kafka cluster. + final KafkaAdminClient kafkaAdminClient = new KafkaAdminClientBuilder() + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getTargetClusterName())) + .withName(testStorage.getAdminName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getTargetClusterName(), testStorage.getTargetUsername())) + .build(); - KubeResourceManager.get().createResourceWithWait( - AdminClientTemplates.tlsAdminClient( - testStorage.getNamespaceName(), - testStorage.getTargetUsername(), - testStorage.getAdminName(), - testStorage.getTargetClusterName(), - KafkaResources.tlsBootstrapAddress(testStorage.getTargetClusterName()) - )); + KubeResourceManager.get().createResourceWithWait(kafkaAdminClient.getDeployment()); final AdminClient targetClusterAdminClient = AdminClientUtils.getConfiguredAdminClient(testStorage.getNamespaceName(), testStorage.getAdminName()); @@ -450,11 +498,23 @@ void testMirrorMaker2TlsAndScramSha512Auth() { LOGGER.info("Messages exchange - Topic: {}, cluster: {}", testStorage.getTopicName(), testStorage.getSourceClusterName()); - final KafkaClients sourceClients = ClientUtils.getInstantScramShaClientBuilder(testStorage, KafkaResources.tlsBootstrapAddress(testStorage.getSourceClusterName())) - .withUsername(testStorage.getSourceUsername()) + final KafkaProducerConsumer sourceKafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getSourceClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getSourceClusterName(), testStorage.getSourceUsername())) .build(); - KubeResourceManager.get().createResourceWithWait(sourceClients.producerScramShaTlsStrimzi(testStorage.getSourceClusterName()), sourceClients.consumerScramShaTlsStrimzi(testStorage.getSourceClusterName())); - ClientUtils.waitForInstantClientSuccess(testStorage); + + KubeResourceManager.get().createResourceWithWait( + sourceKafkaProducerConsumer.getProducer().getJob(), + sourceKafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); KubeResourceManager.get().createResourceWithWait(KafkaMirrorMaker2Templates.kafkaMirrorMaker2(testStorage, 1, true) .editSpec() @@ -482,24 +542,36 @@ void testMirrorMaker2TlsAndScramSha512Auth() { .build()); LOGGER.info("Consuming from mirrored Topic: {}, cluster: {}, user: {}", testStorage.getMirroredSourceTopicName(), testStorage.getTargetClusterName(), testStorage.getTargetClusterName()); - final KafkaClients targetClients = ClientUtils.getInstantScramShaClientBuilder(testStorage, KafkaResources.tlsBootstrapAddress(testStorage.getTargetClusterName())) + final KafkaProducerConsumer targetKafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) .withTopicName(testStorage.getMirroredSourceTopicName()) - .withUsername(testStorage.getTargetUsername()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getTargetClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getTargetClusterName(), testStorage.getTargetUsername())) .build(); - KubeResourceManager.get().createResourceWithWait(targetClients.consumerScramShaTlsStrimzi(testStorage.getTargetClusterName())); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + + KubeResourceManager.get().createResourceWithWait( + targetKafkaProducerConsumer.getProducer().getJob(), + targetKafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); LOGGER.info("Checking topic is mirrored correctly in target cluster"); // deploy admin client - KubeResourceManager.get().createResourceWithWait( - AdminClientTemplates.scramShaOverTlsAdminClient( - testStorage.getNamespaceName(), - testStorage.getTargetUsername(), - testStorage.getAdminName(), - testStorage.getTargetClusterName(), - KafkaResources.tlsBootstrapAddress(testStorage.getTargetClusterName()) - )); + final KafkaAdminClient kafkaAdminClient = new KafkaAdminClientBuilder() + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getTargetClusterName())) + .withName(testStorage.getAdminName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getTargetClusterName(), testStorage.getTargetUsername())) + .build(); + + KubeResourceManager.get().createResourceWithWait(kafkaAdminClient.getDeployment()); + final AdminClient targetClusterAdminClient = AdminClientUtils.getConfiguredAdminClient(testStorage.getNamespaceName(), testStorage.getAdminName()); AdminClientUtils.waitForTopicPresence(targetClusterAdminClient, testStorage.getMirroredSourceTopicName()); @@ -634,14 +706,28 @@ void testIdentityReplicationPolicy() { .build()); LOGGER.info("Producing and consuming messages via {}", testStorage.getSourceClusterName()); - final KafkaClients sourceClients = ClientUtils.getInstantPlainClients(testStorage, KafkaResources.plainBootstrapAddress(testStorage.getSourceClusterName())); - KubeResourceManager.get().createResourceWithWait(sourceClients.producerStrimzi(), sourceClients.consumerStrimzi()); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getSourceClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); LOGGER.info("Consuming mirrored messages via {}", testStorage.getTargetClusterName()); - final KafkaClients targetClients = ClientUtils.getInstantPlainClients(testStorage, KafkaResources.plainBootstrapAddress(testStorage.getTargetClusterName())); - KubeResourceManager.get().createResourceWithWait(targetClients.consumerStrimzi()); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + kafkaProducerConsumer.setBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getTargetClusterName())); + + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); } @ParallelNamespaceTest @@ -724,74 +810,71 @@ void testRestoreOffsetsInConsumerGroup() { KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), testStorage.getSourceClusterName(), 3).build() ); - KafkaClients initialInternalClientSourceJob = new KafkaClientsBuilder() - .withProducerName(sourceProducerName) - .withConsumerName(sourceConsumerName) - .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getSourceClusterName())) - .withTopicName(testStorage.getTopicName()) - .withMessageCount(testStorage.getMessageCount()) - .withMessage("Producer A") - .withConsumerGroup(consumerGroup) - .withNamespaceName(testStorage.getNamespaceName()) - .build(); - - KafkaClients initialInternalClientTargetJob = new KafkaClientsBuilder() - .withProducerName(targetProducerName) - .withConsumerName(targetConsumerName) - .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getTargetClusterName())) - .withTopicName(testStorage.getMirroredSourceTopicName()) - .withMessageCount(testStorage.getMessageCount()) - .withConsumerGroup(consumerGroup) - .withNamespaceName(testStorage.getNamespaceName()) - .build(); + KafkaProducerConsumer sourceKafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(sourceProducerName) + .withConsumerName(sourceConsumerName) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getSourceClusterName())) + .withTopicName(testStorage.getTopicName()) + .withMessageCount(testStorage.getMessageCount()) + .withMessage("Producer A") + .withConsumerGroup(consumerGroup) + .withNamespaceName(testStorage.getNamespaceName()) + .build(); + + KafkaProducerConsumer targetKafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(targetProducerName) + .withConsumerName(targetConsumerName) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getTargetClusterName())) + .withTopicName(testStorage.getMirroredSourceTopicName()) + .withMessageCount(testStorage.getMessageCount()) + .withConsumerGroup(consumerGroup) + .withNamespaceName(testStorage.getNamespaceName()) + .build(); LOGGER.info("Produce & consume {} messages to/from Source cluster", testStorage.getMessageCount()); KubeResourceManager.get().createResourceWithWait( - initialInternalClientSourceJob.producerStrimzi(), - initialInternalClientSourceJob.consumerStrimzi()); + sourceKafkaProducerConsumer.getProducer().getJob(), + sourceKafkaProducerConsumer.getConsumer().getJob() + ); ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), sourceConsumerName, sourceProducerName, testStorage.getMessageCount()); LOGGER.info("Produce {} messages to Source cluster", testStorage.getMessageCount()); - KafkaClients internalClientSourceJob = new KafkaClientsBuilder(initialInternalClientSourceJob).withMessage("Producer B").build(); + sourceKafkaProducerConsumer.setMessage("Producer B"); - KubeResourceManager.get().createResourceWithWait( - internalClientSourceJob.producerStrimzi()); + KubeResourceManager.get().createResourceWithWait(sourceKafkaProducerConsumer.getProducer().getJob()); ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), sourceProducerName, testStorage.getMessageCount()); LOGGER.info("Consume {} messages from mirrored Topic on target cluster", testStorage.getMessageCount()); - KubeResourceManager.get().createResourceWithWait( - initialInternalClientTargetJob.consumerStrimzi()); + KubeResourceManager.get().createResourceWithWait(targetKafkaProducerConsumer.getConsumer().getJob()); ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), targetConsumerName, testStorage.getMessageCount()); LOGGER.info("Produce 50 messages to Source cluster"); - internalClientSourceJob = new KafkaClientsBuilder(internalClientSourceJob).withMessageCount(50).withMessage("Producer C").build(); - KubeResourceManager.get().createResourceWithWait( - internalClientSourceJob.producerStrimzi()); + sourceKafkaProducerConsumer.setMessageCount(50); + sourceKafkaProducerConsumer.setMessage("Producer C"); + + KubeResourceManager.get().createResourceWithWait(sourceKafkaProducerConsumer.getProducer().getJob()); ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), sourceProducerName, 50); LOGGER.info("Consume 10 messages from source cluster"); - internalClientSourceJob = new KafkaClientsBuilder(internalClientSourceJob).withMessageCount(10).withAdditionalConfig("max.poll.records=10").build(); - KubeResourceManager.get().createResourceWithWait( - internalClientSourceJob.consumerStrimzi()); + sourceKafkaProducerConsumer.setMessageCount(10); + sourceKafkaProducerConsumer.setAdditionalConfig("max.poll.records=10"); + KubeResourceManager.get().createResourceWithWait(sourceKafkaProducerConsumer.getConsumer().getJob()); ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), sourceConsumerName, 10); LOGGER.info("Consume 40 messages from mirrored Topic on target cluster"); - KafkaClients internalClientTargetJob = new KafkaClientsBuilder(initialInternalClientTargetJob).withMessageCount(40).build(); - KubeResourceManager.get().createResourceWithWait( - internalClientTargetJob.consumerStrimzi()); + targetKafkaProducerConsumer.setMessageCount(40); + KubeResourceManager.get().createResourceWithWait(targetKafkaProducerConsumer.getConsumer().getJob()); ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), targetConsumerName, 40); LOGGER.info("There should be no more messages to read. Try to consume at least 1 message. " + "This client Job should fail on timeout."); - KubeResourceManager.get().createResourceWithWait( - initialInternalClientTargetJob.consumerStrimzi()); + KubeResourceManager.get().createResourceWithWait(targetKafkaProducerConsumer.getConsumer().getJob()); assertDoesNotThrow(() -> ClientUtils.waitForClientTimeout(testStorage.getNamespaceName(), targetConsumerName, 1)); LOGGER.info("As it's Active-Active MirrorMaker2 mode, there should be no more messages to read from Source cluster" + " topic. This client Job should fail on timeout."); - KubeResourceManager.get().createResourceWithWait( - initialInternalClientSourceJob.consumerStrimzi()); + KubeResourceManager.get().createResourceWithWait(sourceKafkaProducerConsumer.getConsumer().getJob()); assertDoesNotThrow(() -> ClientUtils.waitForClientTimeout(testStorage.getNamespaceName(), sourceConsumerName, 1)); } @@ -817,9 +900,24 @@ void testKafkaMirrorMaker2ConnectorsStateAndOffsetManagement() throws JsonProces final String errorMessage = "One or more connectors are in FAILED state"; - final KafkaClients sourceKafkaClients = ClientUtils.getInstantPlainClients(testStorage, KafkaResources.plainBootstrapAddress(testStorage.getSourceClusterName())); - final KafkaClients targetKafkaCLients = ClientUtils.getInstantPlainClientBuilder(testStorage, KafkaResources.plainBootstrapAddress(testStorage.getTargetClusterName())) + KafkaProducerConsumer sourceKafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getSourceClusterName())) + .withTopicName(testStorage.getTopicName()) + .withMessageCount(testStorage.getMessageCount()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withNamespaceName(testStorage.getNamespaceName()) + .build(); + + KafkaProducerConsumer targetKafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getTargetClusterName())) .withTopicName(testStorage.getMirroredSourceTopicName()) + .withMessageCount(testStorage.getMessageCount()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withNamespaceName(testStorage.getNamespaceName()) .build(); LOGGER.info("Deploy Kafka clusters and KafkaMirrorMaker2: {}/{} with wrong bootstrap service name configuration", testStorage.getNamespaceName(), testStorage.getClusterName()); @@ -875,11 +973,14 @@ void testKafkaMirrorMaker2ConnectorsStateAndOffsetManagement() throws JsonProces KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), testStorage.getSourceClusterName(), 3).build()); LOGGER.info("Success to produce and consume messages on source Kafka Cluster: {}/{} while connector is stopped", testStorage.getNamespaceName(), testStorage.getSourceClusterName()); - KubeResourceManager.get().createResourceWithWait(sourceKafkaClients.producerStrimzi(), sourceKafkaClients.consumerStrimzi()); - ClientUtils.waitForInstantClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait( + sourceKafkaProducerConsumer.getProducer().getJob(), + sourceKafkaProducerConsumer.getConsumer().getJob() + ); + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); LOGGER.info("Fail to consume messages on target Kafka Cluster: {}/{} while connector is stopped", testStorage.getNamespaceName(), testStorage.getSourceClusterName()); - KubeResourceManager.get().createResourceWithWait(targetKafkaCLients.consumerStrimzi()); - ClientUtils.waitForInstantConsumerClientTimeout(testStorage); + KubeResourceManager.get().createResourceWithWait(targetKafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientTimeout(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); LOGGER.info("Re-running KafkaMirrorMaker2: {}/{} source connector", testStorage.getNamespaceName(), testStorage.getClusterName()); KafkaMirrorMaker2Utils.replace(testStorage.getNamespaceName(), testStorage.getClusterName(), @@ -887,8 +988,8 @@ void testKafkaMirrorMaker2ConnectorsStateAndOffsetManagement() throws JsonProces ); LOGGER.info("Consumer in target cluster and Topic should consume {} messages", testStorage.getMessageCount()); - KubeResourceManager.get().createResourceWithWait(targetKafkaCLients.consumerStrimzi()); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(targetKafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); KafkaConnectorUtils.waitForOffsetInConnector( testStorage.getNamespaceName(), @@ -1034,19 +1135,40 @@ void testKMM2RollAfterSecretsCertsUpdateScramSha() { .build()); LOGGER.info("Producing and consuming messages using Topic: {}", testStorage.getSourceClusterName()); - final KafkaClients sourceClients = ClientUtils.getInstantScramShaClientBuilder(testStorage, KafkaResources.tlsBootstrapAddress(testStorage.getSourceClusterName())) - .withUsername(testStorage.getSourceUsername()) + final KafkaProducerConsumer sourceKafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getSourceClusterName())) + .withTopicName(testStorage.getTopicName()) + .withMessageCount(testStorage.getMessageCount()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withNamespaceName(testStorage.getNamespaceName()) + .withAuthentication(ClientsAuthentication.configureTlsScramSha(testStorage.getNamespaceName(), testStorage.getSourceUsername(), testStorage.getSourceClusterName())) .build(); - KubeResourceManager.get().createResourceWithWait(sourceClients.producerScramShaTlsStrimzi(testStorage.getSourceClusterName()), sourceClients.consumerScramShaTlsStrimzi(testStorage.getSourceClusterName())); - ClientUtils.waitForInstantClientSuccess(testStorage); + + KubeResourceManager.get().createResourceWithWait( + sourceKafkaProducerConsumer.getProducer().getJob(), + sourceKafkaProducerConsumer.getConsumer().getJob() + ); + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); LOGGER.info("Consuming mirrored messages using Topic: {}", testStorage.getTargetClusterName()); - final KafkaClients targetClients = ClientUtils.getInstantScramShaClientBuilder(testStorage, KafkaResources.tlsBootstrapAddress(testStorage.getTargetClusterName())) + final KafkaProducerConsumer targetKafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getTargetClusterName())) .withTopicName(testStorage.getMirroredSourceTopicName()) - .withUsername(testStorage.getTargetUsername()) + .withMessageCount(testStorage.getMessageCount()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withNamespaceName(testStorage.getNamespaceName()) + .withAuthentication(ClientsAuthentication.configureTlsScramSha(testStorage.getNamespaceName(), testStorage.getTargetUsername(), testStorage.getTargetClusterName())) .build(); - KubeResourceManager.get().createResourceWithWait(targetClients.consumerScramShaTlsStrimzi(testStorage.getTargetClusterName())); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + + KubeResourceManager.get().createResourceWithWait( + targetKafkaProducerConsumer.getProducer().getJob(), + targetKafkaProducerConsumer.getConsumer().getJob() + ); + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); LOGGER.info("Messages successfully mirrored"); @@ -1063,12 +1185,16 @@ void testKMM2RollAfterSecretsCertsUpdateScramSha() { RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(testStorage.getNamespaceName(), testStorage.getMM2Selector(), 1, mmSnapshot); - //producing to source and consuming from target cluster after rolling update. - KubeResourceManager.get().createResourceWithWait(sourceClients.producerScramShaTlsStrimzi(testStorage.getSourceClusterName())); - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + // because passwords have changed, we need to change the authentication as well (to get new sasl.jaas.config from the users' secrets) + sourceKafkaProducerConsumer.setAuthentication(ClientsAuthentication.configureTlsScramSha(testStorage.getNamespaceName(), testStorage.getSourceUsername(), testStorage.getSourceClusterName())); + targetKafkaProducerConsumer.setAuthentication(ClientsAuthentication.configureTlsScramSha(testStorage.getNamespaceName(), testStorage.getTargetUsername(), testStorage.getTargetClusterName())); + + // producing to source and consuming from target cluster after rolling update. + KubeResourceManager.get().createResourceWithWait(sourceKafkaProducerConsumer.getProducer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); - KubeResourceManager.get().createResourceWithWait(targetClients.consumerScramShaTlsStrimzi(testStorage.getTargetClusterName())); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(targetKafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); } @ParallelNamespaceTest @@ -1182,22 +1308,38 @@ void testKMM2RollAfterSecretsCertsUpdateTLS() { Map mmSnapshot = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getMM2Selector()); - final KafkaClients sourceClients = ClientUtils.getInstantTlsClientBuilder(testStorage, KafkaResources.tlsBootstrapAddress(testStorage.getSourceClusterName())) - .withUsername(testStorage.getSourceUsername()) + final KafkaProducerConsumer sourceKafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getSourceClusterName())) + .withTopicName(testStorage.getTopicName()) + .withMessageCount(testStorage.getMessageCount()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withNamespaceName(testStorage.getNamespaceName()) + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getSourceClusterName(), testStorage.getSourceUsername())) .build(); - final KafkaClients targetClients = ClientUtils.getInstantTlsClientBuilder(testStorage, KafkaResources.tlsBootstrapAddress(testStorage.getTargetClusterName())) + final KafkaProducerConsumer targetKafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getTargetClusterName())) .withTopicName(testStorage.getMirroredSourceTopicName()) - .withUsername(testStorage.getTargetUsername()) + .withMessageCount(testStorage.getMessageCount()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withNamespaceName(testStorage.getNamespaceName()) + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getTargetClusterName(), testStorage.getTargetUsername())) .build(); LOGGER.info("Producing messages in source cluster: {}/{}", testStorage.getNamespaceName(), testStorage.getSourceClusterName()); - KubeResourceManager.get().createResourceWithWait(sourceClients.producerTlsStrimzi(testStorage.getSourceClusterName()), sourceClients.consumerTlsStrimzi(testStorage.getSourceClusterName())); - ClientUtils.waitForInstantClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait( + sourceKafkaProducerConsumer.getProducer().getJob(), + sourceKafkaProducerConsumer.getConsumer().getJob() + ); + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); LOGGER.info("Consuming messages in target cluster: {}/{}", testStorage.getNamespaceName(), testStorage.getTargetClusterName()); - KubeResourceManager.get().createResourceWithWait(targetClients.consumerTlsStrimzi(testStorage.getTargetClusterName())); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(targetKafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); LabelSelector controlSourceSelector = LabelSelectors.kafkaLabelSelector(testStorage.getSourceClusterName(), KafkaComponents.getControllerPodSetName(testStorage.getSourceClusterName())); LabelSelector brokerSourceSelector = LabelSelectors.kafkaLabelSelector(testStorage.getSourceClusterName(), KafkaComponents.getBrokerPodSetName(testStorage.getSourceClusterName())); @@ -1225,11 +1367,14 @@ void testKMM2RollAfterSecretsCertsUpdateTLS() { mmSnapshot = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(testStorage.getNamespaceName(), testStorage.getMM2Selector(), 1, mmSnapshot); LOGGER.info("Producing messages in source cluster: {}/{}", testStorage.getNamespaceName(), testStorage.getSourceClusterName()); - KubeResourceManager.get().createResourceWithWait(sourceClients.producerTlsStrimzi(testStorage.getSourceClusterName()), sourceClients.consumerTlsStrimzi(testStorage.getSourceClusterName())); - ClientUtils.waitForInstantClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait( + sourceKafkaProducerConsumer.getProducer().getJob(), + sourceKafkaProducerConsumer.getConsumer().getJob() + ); + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); LOGGER.info("Consuming messages in target cluster: {}/{}", testStorage.getNamespaceName(), testStorage.getTargetClusterName()); - KubeResourceManager.get().createResourceWithWait(targetClients.consumerTlsStrimzi(testStorage.getTargetClusterName())); + KubeResourceManager.get().createResourceWithWait(targetKafkaProducerConsumer.getConsumer().getJob()); // Extend the timeout for clients to be sure that all messages are synced by MM2 JobUtils.waitForJobSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), TestConstants.GLOBAL_TIMEOUT_LONG); JobUtils.deleteJobsWithWait(testStorage.getNamespaceName(), testStorage.getConsumerName()); @@ -1253,11 +1398,14 @@ void testKMM2RollAfterSecretsCertsUpdateTLS() { RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(testStorage.getNamespaceName(), testStorage.getMM2Selector(), 1, mmSnapshot); LOGGER.info("Producing messages in source cluster: {}/{}", testStorage.getNamespaceName(), testStorage.getSourceClusterName()); - KubeResourceManager.get().createResourceWithWait(sourceClients.producerTlsStrimzi(testStorage.getSourceClusterName()), sourceClients.consumerTlsStrimzi(testStorage.getSourceClusterName())); - ClientUtils.waitForInstantClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait( + sourceKafkaProducerConsumer.getProducer().getJob(), + sourceKafkaProducerConsumer.getConsumer().getJob() + ); + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); LOGGER.info("Consuming messages in target cluster: {}/{}", testStorage.getNamespaceName(), testStorage.getTargetClusterName()); - KubeResourceManager.get().createResourceWithWait(targetClients.consumerTlsStrimzi(testStorage.getTargetClusterName())); + KubeResourceManager.get().createResourceWithWait(targetKafkaProducerConsumer.getConsumer().getJob()); // Extend the timeout for clients to be sure that all messages are synced by MM2 JobUtils.waitForJobSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), TestConstants.GLOBAL_TIMEOUT_LONG); JobUtils.deleteJobsWithWait(testStorage.getNamespaceName(), testStorage.getConsumerName()); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/operators/FeatureGatesST.java b/systemtest/src/test/java/io/strimzi/systemtest/operators/FeatureGatesST.java index 8637cf5604d..64c09e400c0 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/operators/FeatureGatesST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/operators/FeatureGatesST.java @@ -20,7 +20,6 @@ import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.IsolatedTest; import io.strimzi.systemtest.docs.TestDocsLabels; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; import io.strimzi.systemtest.resources.operator.ClusterOperatorConfigurationBuilder; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; import io.strimzi.systemtest.storage.TestStorage; @@ -32,6 +31,8 @@ import io.strimzi.systemtest.utils.ClientUtils; import io.strimzi.systemtest.utils.kubeUtils.controllers.DeploymentUtils; import io.strimzi.test.k8s.KubeClusterResource; +import io.strimzi.testclients.clients.kafka.KafkaConsumerClient; +import io.strimzi.testclients.clients.kafka.KafkaConsumerClientBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.Tag; @@ -170,9 +171,18 @@ void testUseConnectBuildWithBuildah() { .endSpec() .build()); - KafkaClients kafkaClient = ClientUtils.getInstantPlainClients(testStorage, KafkaResources.plainBootstrapAddress(testStorage.getClusterName())); - KubeResourceManager.get().createResourceWithWait(kafkaClient.consumerStrimzi()); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + final KafkaConsumerClient kafkaConsumerClient = new KafkaConsumerClientBuilder() + .withName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait(kafkaConsumerClient.getJob()); + + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); } private static void annotateResourcesAndCheckIfPresent(TestStorage testStorage, boolean shouldBePresent) { diff --git a/systemtest/src/test/java/io/strimzi/systemtest/operators/MultipleClusterOperatorsST.java b/systemtest/src/test/java/io/strimzi/systemtest/operators/MultipleClusterOperatorsST.java index f0ef5a301d2..423789c3446 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/operators/MultipleClusterOperatorsST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/operators/MultipleClusterOperatorsST.java @@ -15,6 +15,7 @@ import io.strimzi.api.kafka.model.connect.KafkaConnect; import io.strimzi.api.kafka.model.connector.KafkaConnector; import io.strimzi.api.kafka.model.kafka.Kafka; +import io.strimzi.api.kafka.model.kafka.KafkaResources; import io.strimzi.api.kafka.model.kafka.cruisecontrol.CruiseControlResources; import io.strimzi.api.kafka.model.rebalance.KafkaRebalance; import io.strimzi.api.kafka.model.rebalance.KafkaRebalanceAnnotation; @@ -25,7 +26,6 @@ import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.IsolatedTest; import io.strimzi.systemtest.docs.TestDocsLabels; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; import io.strimzi.systemtest.performance.gather.collectors.BaseMetricsCollector; import io.strimzi.systemtest.resources.CrdClients; import io.strimzi.systemtest.resources.operator.ClusterOperatorConfigurationBuilder; @@ -50,6 +50,8 @@ import io.strimzi.systemtest.utils.kubeUtils.objects.NetworkPolicyUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils; import io.strimzi.systemtest.utils.specific.MetricsUtils; +import io.strimzi.testclients.clients.kafka.KafkaProducerClient; +import io.strimzi.testclients.clients.kafka.KafkaProducerClientBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.BeforeAll; @@ -198,8 +200,16 @@ void testMultipleCOsInDifferentNamespaces() { .endSpec() .build()); - final KafkaClients basicClients = ClientUtils.getInstantPlainClients(testStorage); - KubeResourceManager.get().createResourceWithWait(basicClients.producerStrimzi()); + final KafkaProducerClient kafkaProducerClient = new KafkaProducerClientBuilder() + .withName(testStorage.getProducerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait(kafkaProducerClient.getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); KafkaConnectUtils.waitForMessagesInKafkaConnectFileSink(testStorage.getNamespaceName(), kafkaConnectPodName, TestConstants.DEFAULT_SINK_FILE_PATH, testStorage.getMessageCount()); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/operators/NamespaceDeletionRecoveryST.java b/systemtest/src/test/java/io/strimzi/systemtest/operators/NamespaceDeletionRecoveryST.java index 3d478a4dd05..f6e6ad15d80 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/operators/NamespaceDeletionRecoveryST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/operators/NamespaceDeletionRecoveryST.java @@ -26,7 +26,6 @@ import io.strimzi.systemtest.annotations.IsolatedTest; import io.strimzi.systemtest.cli.KafkaCmdClient; import io.strimzi.systemtest.docs.TestDocsLabels; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; import io.strimzi.systemtest.resources.CrdClients; import io.strimzi.systemtest.resources.crd.KafkaComponents; import io.strimzi.systemtest.resources.operator.ClusterOperatorConfigurationBuilder; @@ -40,6 +39,8 @@ import io.strimzi.systemtest.utils.kubeUtils.NamespaceUtils; import io.strimzi.systemtest.utils.kubeUtils.controllers.DeploymentUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.PersistentVolumeClaimUtils; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.AfterAll; @@ -133,9 +134,22 @@ void testTopicAvailable() { .endSpec() .build()); - final KafkaClients clients = ClientUtils.getInstantPlainClients(testStorage); - KubeResourceManager.get().createResourceWithWait(clients.producerStrimzi(), clients.consumerStrimzi()); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } @IsolatedTest("We need for each test case its own Cluster Operator") @@ -258,9 +272,22 @@ void testTopicNotAvailable() { DeploymentUtils.waitForDeploymentAndPodsReady(testStorage.getNamespaceName(), testStorage.getEoDeploymentName(), 1); - final KafkaClients clients = ClientUtils.getInstantPlainClients(testStorage); - KubeResourceManager.get().createResourceWithWait(clients.producerStrimzi(), clients.consumerStrimzi()); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } private void prepareEnvironmentForRecovery(TestStorage testStorage) { @@ -307,9 +334,22 @@ private void prepareEnvironmentForRecovery(TestStorage testStorage) { KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(testStorage).build()); - final KafkaClients clients = ClientUtils.getInstantPlainClients(testStorage); - KubeResourceManager.get().createResourceWithWait(clients.producerStrimzi(), clients.consumerStrimzi()); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); LOGGER.info("##################################################"); LOGGER.info("Environment for recovery was successfully created"); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/operators/PodSetST.java b/systemtest/src/test/java/io/strimzi/systemtest/operators/PodSetST.java index aa1ed5fedae..91991707389 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/operators/PodSetST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/operators/PodSetST.java @@ -12,11 +12,11 @@ import io.skodjob.annotations.TestDoc; import io.skodjob.kubetest4j.resources.KubeResourceManager; import io.strimzi.api.kafka.model.common.ProbeBuilder; +import io.strimzi.api.kafka.model.kafka.KafkaResources; import io.strimzi.systemtest.AbstractST; import io.strimzi.systemtest.Environment; import io.strimzi.systemtest.annotations.IsolatedTest; import io.strimzi.systemtest.docs.TestDocsLabels; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; import io.strimzi.systemtest.storage.TestStorage; import io.strimzi.systemtest.templates.crd.KafkaNodePoolTemplates; @@ -28,6 +28,8 @@ import io.strimzi.systemtest.utils.kubeUtils.controllers.DeploymentUtils; import io.strimzi.systemtest.utils.kubeUtils.controllers.StrimziPodSetUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.BeforeAll; @@ -96,12 +98,21 @@ void testPodSetOnlyReconciliation() { .build() ); - final KafkaClients clients = ClientUtils.getContinuousPlainClientBuilder(testStorage).build(); + final KafkaProducerConsumer continuousKafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getContinuousProducerName()) + .withConsumerName(testStorage.getContinuousConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getContinuousTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getContinuousMessageCount()) + .withDelayMs(1000) + .build(); LOGGER.info("Producing and Consuming messages with clients: {}, {} in Namespace {}", testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName()); KubeResourceManager.get().createResourceWithWait( - clients.producerStrimzi(), - clients.consumerStrimzi() + continuousKafkaProducerConsumer.getProducer().getJob(), + continuousKafkaProducerConsumer.getConsumer().getJob() ); LOGGER.info("Changing {} to 'true', so only SPS will be reconciled", Environment.STRIMZI_POD_SET_RECONCILIATION_ONLY_ENV); @@ -140,7 +151,7 @@ void testPodSetOnlyReconciliation() { LOGGER.info("Wait till all StrimziPodSet {}/{} status match number of ready pods", testStorage.getNamespaceName(), testStorage.getBrokerComponentName()); StrimziPodSetUtils.waitForAllStrimziPodSetAndPodsReady(testStorage.getNamespaceName(), testStorage.getClusterName(), testStorage.getBrokerComponentName(), 3); - ClientUtils.waitForContinuousClientSuccess(testStorage); + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getContinuousConsumerName(), testStorage.getContinuousProducerName(), testStorage.getMessageCount()); } @BeforeAll diff --git a/systemtest/src/test/java/io/strimzi/systemtest/operators/RecoveryST.java b/systemtest/src/test/java/io/strimzi/systemtest/operators/RecoveryST.java index 1f3863ab48d..3bb9b71d0b8 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/operators/RecoveryST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/operators/RecoveryST.java @@ -21,7 +21,6 @@ import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.IsolatedTest; import io.strimzi.systemtest.docs.TestDocsLabels; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; import io.strimzi.systemtest.labels.LabelSelectors; import io.strimzi.systemtest.resources.crd.KafkaComponents; import io.strimzi.systemtest.resources.operator.ClusterOperatorConfigurationBuilder; @@ -37,6 +36,8 @@ import io.strimzi.systemtest.utils.kubeUtils.controllers.StrimziPodSetUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.ServiceUtils; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.BeforeEach; @@ -192,9 +193,22 @@ void testRecoveryFromImpossibleMemoryRequest() { } private void verifyStabilityBySendingAndReceivingMessages(TestStorage testStorage) { - KafkaClients kafkaClients = ClientUtils.getInstantPlainClients(testStorage, KafkaResources.plainBootstrapAddress(sharedClusterName)); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerStrimzi(), kafkaClients.consumerStrimzi()); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(sharedClusterName)) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } @IsolatedTest diff --git a/systemtest/src/test/java/io/strimzi/systemtest/operators/topic/TopicReplicasChangeST.java b/systemtest/src/test/java/io/strimzi/systemtest/operators/topic/TopicReplicasChangeST.java index 970f5b96fdd..1dd7eb269c0 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/operators/topic/TopicReplicasChangeST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/operators/topic/TopicReplicasChangeST.java @@ -21,8 +21,6 @@ import io.strimzi.systemtest.annotations.IsolatedTest; import io.strimzi.systemtest.annotations.ParallelTest; import io.strimzi.systemtest.docs.TestDocsLabels; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder; import io.strimzi.systemtest.labels.LabelSelectors; import io.strimzi.systemtest.resources.CrdClients; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; @@ -35,6 +33,8 @@ import io.strimzi.systemtest.utils.kafkaUtils.KafkaTopicUtils; import io.strimzi.systemtest.utils.kubeUtils.controllers.DeploymentUtils; import io.strimzi.systemtest.utils.specific.ScraperUtils; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.BeforeAll; @@ -399,20 +399,21 @@ private void verifyKafkaTopicAfterReplicationChange(final TestStorage testStorag * @param testStorage The test storage instance providing details for message exchange, including topic and client information. */ private void sendAndRecvMessages(final TestStorage testStorage) { - KafkaClients kafkaClients = new KafkaClientsBuilder() - .withTopicName(testStorage.getTopicName()) - .withBootstrapAddress(KafkaResources.plainBootstrapAddress(sharedTestStorage.getClusterName())) - .withNamespaceName(sharedTestStorage.getNamespaceName()) - .withProducerName(testStorage.getProducerName()) - .withConsumerName(testStorage.getConsumerName()) - .withMessageCount(testStorage.getMessageCount()) - .build(); + KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withTopicName(testStorage.getTopicName()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(sharedTestStorage.getClusterName())) + .withNamespaceName(sharedTestStorage.getNamespaceName()) + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withMessageCount(testStorage.getMessageCount()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .build(); KubeResourceManager.get().createResourceWithWait( - kafkaClients.producerStrimzi(), - kafkaClients.consumerStrimzi() + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() ); - ClientUtils.waitForInstantClientSuccess(testStorage); + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } @BeforeAll diff --git a/systemtest/src/test/java/io/strimzi/systemtest/operators/topic/TopicST.java b/systemtest/src/test/java/io/strimzi/systemtest/operators/topic/TopicST.java index 5749602e917..debf2989370 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/operators/topic/TopicST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/operators/topic/TopicST.java @@ -25,7 +25,6 @@ import io.strimzi.systemtest.docs.TestDocsLabels; import io.strimzi.systemtest.enums.ConditionStatus; import io.strimzi.systemtest.enums.CustomResourceStatus; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; import io.strimzi.systemtest.kafkaclients.internalClients.admin.AdminClient; import io.strimzi.systemtest.metrics.TopicOperatorMetricsComponent; import io.strimzi.systemtest.performance.gather.collectors.BaseMetricsCollector; @@ -35,7 +34,6 @@ import io.strimzi.systemtest.templates.crd.KafkaNodePoolTemplates; import io.strimzi.systemtest.templates.crd.KafkaTemplates; import io.strimzi.systemtest.templates.crd.KafkaTopicTemplates; -import io.strimzi.systemtest.templates.specific.AdminClientTemplates; import io.strimzi.systemtest.templates.specific.ScraperTemplates; import io.strimzi.systemtest.utils.AdminClientUtils; import io.strimzi.systemtest.utils.ClientUtils; @@ -44,6 +42,10 @@ import io.strimzi.systemtest.utils.kafkaUtils.KafkaUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils; import io.strimzi.systemtest.utils.specific.ScraperUtils; +import io.strimzi.testclients.clients.kafka.KafkaAdminClient; +import io.strimzi.testclients.clients.kafka.KafkaAdminClientBuilder; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.hamcrest.CoreMatchers; @@ -197,9 +199,22 @@ void testSendingMessagesToNonExistingTopic() { LOGGER.info("Topic with name {} is not created yet", testStorage.getTopicName()); LOGGER.info("Sending messages to non-existing Topic: {}, with auto.topic.creation configuration enabled", testStorage.getTopicName()); - final KafkaClients clients = ClientUtils.getInstantPlainClients(testStorage, KafkaResources.plainBootstrapAddress(sharedTestStorage.getClusterName())); - KubeResourceManager.get().createResourceWithWait(clients.producerStrimzi(), clients.consumerStrimzi()); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(sharedTestStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); LOGGER.info("Checking if topic {} is present in Kafka", testStorage.getTopicName()); assertTrue(AdminClientUtils.isTopicPresent(adminClient, testStorage.getTopicName())); @@ -246,27 +261,41 @@ void testDeleteTopicEnableFalse() { // create Kafka Topic CR and wait for its presence in Kafka cluster. KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(testStorage).build()); - KubeResourceManager.get().createResourceWithWait( - AdminClientTemplates.plainAdminClient( - testStorage.getNamespaceName(), - testStorage.getAdminName(), - KafkaResources.plainBootstrapAddress(testStorage.getClusterName()) - ).build() - ); + final KafkaAdminClient kafkaAdminClient = new KafkaAdminClientBuilder() + .withName(testStorage.getAdminName()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withNamespaceName(testStorage.getNamespaceName()) + .build(); + + KubeResourceManager.get().createResourceWithWait(kafkaAdminClient.getDeployment()); final AdminClient localKafkaAdminClient = AdminClientUtils.getConfiguredAdminClient(testStorage.getNamespaceName(), testStorage.getAdminName()); AdminClientUtils.waitForTopicPresence(localKafkaAdminClient, testStorage.getTopicName()); - final KafkaClients clients = ClientUtils.getInstantPlainClients(testStorage); - KubeResourceManager.get().createResourceWithWait(clients.producerStrimzi()); - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); LOGGER.info("Try to delete KafkaTopic: {}/{}", testStorage.getNamespaceName(), testStorage.getTopicName()); CrdClients.kafkaTopicClient().inNamespace(testStorage.getNamespaceName()).withName(testStorage.getTopicName()).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete(); KafkaTopicUtils.waitForTopicStatusMessage(testStorage.getNamespaceName(), testStorage.getTopicName(), "TopicDeletionDisabledException"); - KubeResourceManager.get().createResourceWithWait(clients.consumerStrimzi()); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + kafkaProducerConsumer.setConsumerGroup(ClientUtils.generateRandomConsumerGroup()); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); LOGGER.info("Enable automatic topic deletion"); Map kafkaPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getBrokerSelector()); @@ -591,13 +620,14 @@ void setup() { ); LOGGER.info("Deploying admin client across all test cases for namespace: {}", sharedTestStorage.getClusterName()); - KubeResourceManager.get().createResourceWithWait( - AdminClientTemplates.plainAdminClient( - sharedTestStorage.getNamespaceName(), - sharedTestStorage.getAdminName(), - KafkaResources.plainBootstrapAddress(sharedTestStorage.getClusterName()) - ).build() - ); + final KafkaAdminClient kafkaAdminClient = new KafkaAdminClientBuilder() + .withName(sharedTestStorage.getAdminName()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(sharedTestStorage.getClusterName())) + .withNamespaceName(sharedTestStorage.getNamespaceName()) + .build(); + + KubeResourceManager.get().createResourceWithWait(kafkaAdminClient.getDeployment()); + adminClient = AdminClientUtils.getConfiguredAdminClient(sharedTestStorage.getNamespaceName(), sharedTestStorage.getAdminName()); scraperPodName = ScraperUtils.getScraperPod(Environment.TEST_SUITE_NAMESPACE).getMetadata().getName(); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/operators/user/UserST.java b/systemtest/src/test/java/io/strimzi/systemtest/operators/user/UserST.java index ab6e31445f4..e1d1dd61b7c 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/operators/user/UserST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/operators/user/UserST.java @@ -31,8 +31,7 @@ import io.strimzi.systemtest.annotations.ParallelTest; import io.strimzi.systemtest.cli.KafkaCmdClient; import io.strimzi.systemtest.docs.TestDocsLabels; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder; +import io.strimzi.systemtest.kafkaclients.ClientsAuthentication; import io.strimzi.systemtest.resources.CrdClients; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; import io.strimzi.systemtest.storage.TestStorage; @@ -47,6 +46,8 @@ import io.strimzi.systemtest.utils.kubeUtils.objects.SecretUtils; import io.strimzi.test.ReadWriteUtils; import io.strimzi.test.TestUtils; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.hamcrest.CoreMatchers; @@ -162,12 +163,23 @@ void testUpdateUser() { final long observedGeneration = CrdClients.kafkaUserClient().inNamespace(Environment.TEST_SUITE_NAMESPACE).withName(testStorage.getKafkaUsername()).get().getStatus().getObservedGeneration(); // Send and receive messages - KafkaClients kafkaClients = ClientUtils.getInstantTlsClientBuilder(testStorage, KafkaResources.tlsBootstrapAddress(sharedTestStorage.getClusterName())) - .withUsername(testStorage.getKafkaUsername()) + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(sharedTestStorage.getClusterName())) + .withAuthentication(ClientsAuthentication.configureTls(sharedTestStorage.getClusterName(), testStorage.getKafkaUsername())) + .withMessageCount(testStorage.getMessageCount()) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(sharedTestStorage.getClusterName()), kafkaClients.consumerTlsStrimzi(sharedTestStorage.getClusterName())); - ClientUtils.waitForInstantClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); KafkaUserUtils.replace(Environment.TEST_SUITE_NAMESPACE, testStorage.getKafkaUsername(), ku -> { ku.getSpec().setAuthentication(new KafkaUserScramSha512ClientAuthentication()); @@ -186,12 +198,15 @@ void testUpdateUser() { assertThat(kafkaUserAsJson, hasJsonPath("$.metadata.namespace", equalTo(Environment.TEST_SUITE_NAMESPACE))); assertThat(kafkaUserAsJson, hasJsonPath("$.spec.authentication.type", equalTo("scram-sha-512"))); - kafkaClients = new KafkaClientsBuilder(kafkaClients) - .withBootstrapAddress(KafkaResources.bootstrapServiceName(sharedTestStorage.getClusterName()) + ":9095") - .build(); + kafkaProducerConsumer.setBootstrapAddress(KafkaResources.bootstrapServiceName(sharedTestStorage.getClusterName()) + ":9095"); + kafkaProducerConsumer.setAuthentication(ClientsAuthentication.configureTlsScramSha(testStorage.getNamespaceName(), testStorage.getKafkaUsername(), sharedTestStorage.getClusterName())); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerScramShaTlsStrimzi(sharedTestStorage.getClusterName()), kafkaClients.consumerScramShaTlsStrimzi(sharedTestStorage.getClusterName())); - ClientUtils.waitForInstantClientSuccess(testStorage); + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } @TestDoc( @@ -278,28 +293,33 @@ void testUserWithQuotas(KafkaUser user) { result.contains("controller_mutation_rate=" + mutRate); }); - final KafkaClients kafkaClients = ClientUtils.getInstantScramShaClientBuilder(testStorage, KafkaResources.tlsBootstrapAddress(sharedTestStorage.getClusterName())) - .withUsername(userName) + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(sharedTestStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) .build(); - if (user.getSpec().getAuthentication() instanceof KafkaUserScramSha512ClientAuthentication) { - kafkaClients.setBootstrapAddress(KafkaResources.bootstrapServiceName(sharedTestStorage.getClusterName()) + ":9095"); - - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerScramShaTlsStrimzi(sharedTestStorage.getClusterName()), - kafkaClients.consumerScramShaTlsStrimzi(sharedTestStorage.getClusterName())); + if (user.getSpec().getAuthentication() instanceof KafkaUserScramSha512ClientAuthentication) { + kafkaProducerConsumer.setBootstrapAddress(KafkaResources.bootstrapServiceName(sharedTestStorage.getClusterName()) + ":9095"); + kafkaProducerConsumer.setAuthentication(ClientsAuthentication.configureTlsScramSha(testStorage.getNamespaceName(), userName, sharedTestStorage.getClusterName())); } else if (user.getSpec().getAuthentication() instanceof KafkaUserTlsClientAuthentication) { - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(sharedTestStorage.getClusterName()), - kafkaClients.consumerTlsStrimzi(sharedTestStorage.getClusterName())); - + kafkaProducerConsumer.setAuthentication(ClientsAuthentication.configureTls(sharedTestStorage.getClusterName(), userName)); } else if (user.getSpec().getAuthentication() instanceof KafkaUserTlsExternalClientAuthentication) { SecretUtils.createExternalTlsUserSecret(testStorage.getNamespaceName(), userName, sharedTestStorage.getClusterName()); - - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(sharedTestStorage.getClusterName()), - kafkaClients.consumerTlsStrimzi(sharedTestStorage.getClusterName())); + kafkaProducerConsumer.setAuthentication(ClientsAuthentication.configureTls(sharedTestStorage.getClusterName(), userName)); } - ClientUtils.waitForInstantClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); // delete user CrdClients.kafkaUserClient().inNamespace(Environment.TEST_SUITE_NAMESPACE).withName(userName).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete(); @@ -385,22 +405,35 @@ void testCreatingUsersWithSecretPrefix() { assertNotNull(tlsSecret); assertNotNull(scramShaSecret); - KafkaClients clients = ClientUtils.getInstantTlsClientBuilder(testStorage) - .withUsername(secretPrefix + tlsUserName) + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())) + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getClusterName(), secretPrefix + tlsUserName)) + .withMessageCount(testStorage.getMessageCount()) .build(); LOGGER.info("Checking if TLS user is able to send messages"); - KubeResourceManager.get().createResourceWithWait(clients.producerTlsStrimzi(testStorage.getClusterName()), clients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); - - clients = ClientUtils.getInstantScramShaOverPlainClientBuilder(testStorage) - .withUsername(secretPrefix + scramShaUserName) - .build(); + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); LOGGER.info("Checking if SCRAM-SHA-512 user is able to send messages"); - KubeResourceManager.get().createResourceWithWait(clients.producerScramShaPlainStrimzi(), clients.consumerScramShaPlainStrimzi()); - ClientUtils.waitForInstantClientSuccess(testStorage); + kafkaProducerConsumer.setBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())); + kafkaProducerConsumer.setAuthentication(ClientsAuthentication.configurePlainScramSha(testStorage.getNamespaceName(), secretPrefix + scramShaUserName)); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); LOGGER.info("Checking owner reference - if the Secret will be deleted when we delete KafkaUser"); @@ -487,13 +520,23 @@ void testTlsExternalUser() { SecretUtils.createExternalTlsUserSecret(testStorage.getNamespaceName(), testStorage.getKafkaUsername(), testStorage.getClusterName()); - KafkaClients kafkaClients = ClientUtils.getInstantTlsClientBuilder(testStorage) - .withUsername(testStorage.getKafkaUsername()) + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) .withConsumerGroup(consumerGroupName) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getClusterName(), testStorage.getKafkaUsername())) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(testStorage.getClusterName()), kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); KafkaUserUtils.replace(testStorage.getNamespaceName(), testStorage.getKafkaUsername(), user -> { user.getSpec().setAuthorization(new KafkaUserAuthorizationSimpleBuilder() @@ -510,14 +553,12 @@ void testTlsExternalUser() { // Change the producer name in order to sure that we will not pick old Pod (race condition) String newProducerName = testStorage.getProducerName() + "-authz"; - kafkaClients = new KafkaClientsBuilder(kafkaClients) - .withProducerName(newProducerName) - .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(testStorage.getClusterName())); + kafkaProducerConsumer.setProducerName(newProducerName); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getProducer().getJob()); PodUtils.waitUntilMessageIsInPodLogs(testStorage.getNamespaceName(), - PodUtils.getPodNameByPrefix(testStorage.getNamespaceName(), newProducerName), "authorization failed"); + PodUtils.getPodNameByPrefix(testStorage.getNamespaceName(), newProducerName), "Not authorized"); ClientUtils.waitForClientTimeout(testStorage.getNamespaceName(), newProducerName, testStorage.getMessageCount()); } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/AlternativeReconcileTriggersST.java b/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/AlternativeReconcileTriggersST.java index 6592757152e..48772df1ac6 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/AlternativeReconcileTriggersST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/AlternativeReconcileTriggersST.java @@ -11,6 +11,7 @@ import io.strimzi.api.kafka.model.kafka.JbodStorage; import io.strimzi.api.kafka.model.kafka.JbodStorageBuilder; import io.strimzi.api.kafka.model.kafka.KRaftMetadataStorage; +import io.strimzi.api.kafka.model.kafka.KafkaResources; import io.strimzi.api.kafka.model.kafka.PersistentClaimStorage; import io.strimzi.api.kafka.model.kafka.PersistentClaimStorageBuilder; import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; @@ -21,8 +22,7 @@ import io.strimzi.systemtest.AbstractST; import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.ParallelNamespaceTest; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder; +import io.strimzi.systemtest.kafkaclients.ClientsAuthentication; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; import io.strimzi.systemtest.storage.TestStorage; import io.strimzi.systemtest.templates.crd.KafkaNodePoolTemplates; @@ -40,6 +40,8 @@ import io.strimzi.systemtest.utils.kubeUtils.objects.PersistentVolumeUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils; import io.strimzi.test.TestUtils; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.BeforeAll; @@ -97,17 +99,39 @@ void testManualTriggeringRollingUpdate() { producerAdditionConfiguration = producerAdditionConfiguration.concat("\ntransactional.id=" + testStorage.getContinuousTopicName() + ".1"); producerAdditionConfiguration = producerAdditionConfiguration.concat("\nenable.idempotence=true"); - final KafkaClients continuousClients = ClientUtils.getContinuousPlainClientBuilder(testStorage) + final KafkaProducerConsumer continuousKafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getContinuousProducerName()) + .withConsumerName(testStorage.getContinuousConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getContinuousTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) .withMessageCount(continuousClientsMessageCount) .withAdditionalConfig(producerAdditionConfiguration) + .withDelayMs(1000) + .withAcks("all") .build(); - KubeResourceManager.get().createResourceWithWait(continuousClients.producerStrimzi(), continuousClients.consumerStrimzi()); + KubeResourceManager.get().createResourceWithWait( + continuousKafkaProducerConsumer.getProducer().getJob(), + continuousKafkaProducerConsumer.getConsumer().getJob() + ); KubeResourceManager.get().createResourceWithWait(KafkaUserTemplates.tlsUser(testStorage).build()); - KafkaClients instantClients = ClientUtils.getInstantTlsClientBuilder(testStorage).build(); - KubeResourceManager.get().createResourceWithWait(instantClients.producerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getClusterName(), testStorage.getUsername())) + .build(); + + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getProducer().getJob()); + + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); // rolling update for kafka // set annotation to trigger Kafka rolling update @@ -126,8 +150,8 @@ void testManualTriggeringRollingUpdate() { () -> StrimziPodSetUtils.getAnnotationsOfStrimziPodSet(testStorage.getNamespaceName(), testStorage.getBrokerComponentName()) == null || !StrimziPodSetUtils.getAnnotationsOfStrimziPodSet(testStorage.getNamespaceName(), testStorage.getBrokerComponentName()).containsKey(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE)); - KubeResourceManager.get().createResourceWithWait(instantClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); // rolling update for controller pods // set annotation to trigger controller rolling update @@ -146,27 +170,30 @@ void testManualTriggeringRollingUpdate() { () -> StrimziPodSetUtils.getAnnotationsOfStrimziPodSet(testStorage.getNamespaceName(), testStorage.getControllerComponentName()) == null || !StrimziPodSetUtils.getAnnotationsOfStrimziPodSet(testStorage.getNamespaceName(), testStorage.getControllerComponentName()).containsKey(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE)); - instantClients.generateNewConsumerGroup(); - KubeResourceManager.get().createResourceWithWait(instantClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + kafkaProducerConsumer.setConsumerGroup(ClientUtils.generateRandomConsumerGroup()); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); // Create new topic to ensure, that controller is working properly String newTopicName = KafkaTopicUtils.generateRandomNameOfTopic(); KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), newTopicName, testStorage.getClusterName(), 1, 1).build()); - instantClients = new KafkaClientsBuilder(instantClients) - .withTopicName(newTopicName) - .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) - .build(); + kafkaProducerConsumer.setTopicName(newTopicName); + kafkaProducerConsumer.setConsumerGroup(ClientUtils.generateRandomConsumerGroup()); KubeResourceManager.get().createResourceWithWait( - instantClients.producerTlsStrimzi(testStorage.getClusterName()), - instantClients.consumerTlsStrimzi(testStorage.getClusterName()) + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() ); - ClientUtils.waitForInstantClientSuccess(testStorage); - ClientUtils.waitForContinuousClientSuccess(testStorage, continuousClientsMessageCount); + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), continuousClientsMessageCount); + + // ############################## + // Validate that continuous clients finished successfully + // ############################## + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getContinuousConsumerName(), testStorage.getContinuousProducerName(), continuousClientsMessageCount); + // ############################## } // This test is affected by https://github.com/strimzi/strimzi-kafka-operator/issues/3913 so it needs longer operation timeout set in CO @@ -369,17 +396,38 @@ void testAddingAndRemovingJbodVolumes() { producerAdditionConfiguration = producerAdditionConfiguration.concat("\ntransactional.id=" + testStorage.getContinuousTopicName() + ".1"); producerAdditionConfiguration = producerAdditionConfiguration.concat("\nenable.idempotence=true"); - KafkaClients kafkaBasicClientJob = ClientUtils.getContinuousPlainClientBuilder(testStorage) + final KafkaProducerConsumer continuousKafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getContinuousProducerName()) + .withConsumerName(testStorage.getContinuousConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getContinuousTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) .withMessageCount(continuousClientsMessageCount) .withAdditionalConfig(producerAdditionConfiguration) + .withDelayMs(1000) + .withAcks("all") .build(); - KubeResourceManager.get().createResourceWithWait(kafkaBasicClientJob.producerStrimzi(), kafkaBasicClientJob.consumerStrimzi()); + KubeResourceManager.get().createResourceWithWait( + continuousKafkaProducerConsumer.getProducer().getJob(), + continuousKafkaProducerConsumer.getConsumer().getJob() + ); // ############################## - KafkaClients clients = ClientUtils.getInstantPlainClientBuilder(testStorage).build(); - KubeResourceManager.get().createResourceWithWait(clients.producerStrimzi()); - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getProducer().getJob()); + + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); // Add Jbod volume to Kafka => triggers RU LOGGER.info("Add JBOD volume to the Kafka cluster {}", testStorage.getBrokerComponentName()); @@ -421,13 +469,13 @@ void testAddingAndRemovingJbodVolumes() { }); - KubeResourceManager.get().createResourceWithWait(clients.consumerStrimzi()); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); // ############################## // Validate that continuous clients finished successfully // ############################## - ClientUtils.waitForContinuousClientSuccess(testStorage, continuousClientsMessageCount); + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getContinuousConsumerName(), testStorage.getContinuousProducerName(), continuousClientsMessageCount); // ############################## } @@ -500,17 +548,38 @@ void testJbodMetadataLogRelocation() { producerAdditionConfiguration = producerAdditionConfiguration.concat("\ntransactional.id=" + testStorage.getContinuousTopicName() + ".1"); producerAdditionConfiguration = producerAdditionConfiguration.concat("\nenable.idempotence=true"); - KafkaClients kafkaBasicClientJob = ClientUtils.getContinuousPlainClientBuilder(testStorage) + final KafkaProducerConsumer continuousKafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getContinuousProducerName()) + .withConsumerName(testStorage.getContinuousConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getContinuousTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) .withMessageCount(continuousClientsMessageCount) .withAdditionalConfig(producerAdditionConfiguration) + .withDelayMs(1000) + .withAcks("all") .build(); - KubeResourceManager.get().createResourceWithWait(kafkaBasicClientJob.producerStrimzi(), kafkaBasicClientJob.consumerStrimzi()); + KubeResourceManager.get().createResourceWithWait( + continuousKafkaProducerConsumer.getProducer().getJob(), + continuousKafkaProducerConsumer.getConsumer().getJob() + ); // ############################## - KafkaClients clients = ClientUtils.getInstantPlainClientBuilder(testStorage).build(); - KubeResourceManager.get().createResourceWithWait(clients.producerStrimzi()); - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getProducer().getJob()); + + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); Map brokerPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getBrokerSelector()); @@ -530,13 +599,13 @@ void testJbodMetadataLogRelocation() { // verify that Kraft metadata log will be re-assigned to another volume (the minimum id, which is 0 now that's why data-0) KafkaUtils.verifyKafkaKraftMetadataLog(testStorage, 0, 2); - KubeResourceManager.get().createResourceWithWait(clients.consumerStrimzi()); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); // ############################## // Validate that continuous clients finished successfully // ############################## - ClientUtils.waitForContinuousClientSuccess(testStorage, continuousClientsMessageCount); + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getContinuousConsumerName(), testStorage.getContinuousProducerName(), continuousClientsMessageCount); // ############################## } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/KafkaRollerST.java b/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/KafkaRollerST.java index c682c2f994d..fc5ea6e19a0 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/KafkaRollerST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/KafkaRollerST.java @@ -30,8 +30,6 @@ import io.strimzi.systemtest.AbstractST; import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.ParallelNamespaceTest; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder; import io.strimzi.systemtest.labels.LabelSelectors; import io.strimzi.systemtest.resources.CrdClients; import io.strimzi.systemtest.resources.crd.KafkaComponents; @@ -48,6 +46,8 @@ import io.strimzi.systemtest.utils.kafkaUtils.KafkaTopicUtils; import io.strimzi.systemtest.utils.kafkaUtils.KafkaUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.BeforeAll; @@ -103,17 +103,24 @@ void testKafkaDoesNotRollsWhenTopicIsUnderReplicated() { KubeResourceManager.get().createResourceWithWait(kafkaTopicWith3Replicas); // setup clients - KafkaClients clients = ClientUtils.getInstantPlainClientBuilder(testStorage) + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) .withTopicName(topicNameWith3Replicas) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) .build(); // producing and consuming data when there are 3 brokers ensures that 'consumer_offests' topic will have all of its replicas only across first 3 brokers LOGGER.info("Producing and Consuming messages with clients: {}, {} in Namespace {}", testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName()); KubeResourceManager.get().createResourceWithWait( - clients.producerStrimzi(), - clients.consumerStrimzi() + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() ); - ClientUtils.waitForInstantClientSuccess(testStorage); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); LOGGER.info("Scale Kafka up from 3 to 4 brokers"); KafkaNodePoolUtils.replace(testStorage.getNamespaceName(), testStorage.getBrokerPoolName(), knp -> knp.getSpec().setReplicas(scaledUpBrokerReplicaCount)); @@ -130,16 +137,15 @@ void testKafkaDoesNotRollsWhenTopicIsUnderReplicated() { List events = StUtils.listEventsByResourceUid(testStorage.getNamespaceName(), uid); assertThat(events, hasAllOfReasons(Scheduled, Pulled, Created, Started)); - clients = new KafkaClientsBuilder(clients) - .withTopicName(topicNameWith4Replicas) - .build(); + kafkaProducerConsumer.setTopicName(topicNameWith4Replicas); LOGGER.info("Producing and Consuming messages with clients: {}, {} in Namespace {}", testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName()); KubeResourceManager.get().createResourceWithWait( - clients.producerStrimzi(), - clients.consumerStrimzi() + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() ); - ClientUtils.waitForInstantClientSuccess(testStorage); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); LOGGER.info("Scaling down to {}", initialBrokerReplicaCount); KafkaNodePoolUtils.replace(testStorage.getNamespaceName(), testStorage.getBrokerPoolName(), knp -> knp.getSpec().setReplicas(initialBrokerReplicaCount)); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/RollingUpdateST.java b/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/RollingUpdateST.java index 3c6df1df072..6aaf800e629 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/RollingUpdateST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/RollingUpdateST.java @@ -20,6 +20,7 @@ import io.strimzi.api.kafka.model.common.ProbeBuilder; import io.strimzi.api.kafka.model.common.metrics.JmxPrometheusExporterMetrics; import io.strimzi.api.kafka.model.common.metrics.JmxPrometheusExporterMetricsBuilder; +import io.strimzi.api.kafka.model.kafka.KafkaResources; import io.strimzi.api.kafka.model.topic.KafkaTopic; import io.strimzi.operator.common.Annotations; import io.strimzi.systemtest.AbstractST; @@ -27,7 +28,7 @@ import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.IsolatedTest; import io.strimzi.systemtest.annotations.ParallelNamespaceTest; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; +import io.strimzi.systemtest.kafkaclients.ClientsAuthentication; import io.strimzi.systemtest.metrics.KafkaMetricsComponent; import io.strimzi.systemtest.resources.crd.KafkaComponents; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; @@ -46,6 +47,8 @@ import io.strimzi.systemtest.utils.kubeUtils.objects.PersistentVolumeClaimUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils; import io.strimzi.test.TestUtils; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.BeforeAll; @@ -105,9 +108,22 @@ void testRecoveryDuringKRaftRollingUpdate() { KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), testStorage.getClusterName(), 2, 2).build() ); - final KafkaClients clients = ClientUtils.getInstantPlainClients(testStorage); - KubeResourceManager.get().createResourceWithWait(clients.producerStrimzi(), clients.consumerStrimzi()); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); // change controller knp to unreasonable CPU request causing trigger of Rolling update and recover by second modification modifyNodePoolToUnscheduledAndRecover(testStorage.getControllerPoolName(), testStorage.getControllerSelector(), testStorage); @@ -115,9 +131,9 @@ void testRecoveryDuringKRaftRollingUpdate() { // change broker knp to unreasonable CPU request causing trigger of Rolling update modifyNodePoolToUnscheduledAndRecover(testStorage.getBrokerPoolName(), testStorage.getBrokerSelector(), testStorage); - clients.generateNewConsumerGroup(); - KubeResourceManager.get().createResourceWithWait(clients.consumerStrimzi()); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + kafkaProducerConsumer.setConsumerGroup(ClientUtils.generateRandomConsumerGroup()); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); } /** @@ -180,12 +196,23 @@ void testKafkaScaleUpScaleDown() { // communicate with topic before scaling up/down KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), testStorage.getClusterName(), 3, initialReplicas, initialReplicas).build()); - final KafkaClients clientsBeforeScale = ClientUtils.getInstantTlsClients(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getClusterName(), testStorage.getUsername())) + .build(); + KubeResourceManager.get().createResourceWithWait( - clientsBeforeScale.producerTlsStrimzi(testStorage.getClusterName()), - clientsBeforeScale.consumerTlsStrimzi(testStorage.getClusterName()) + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() ); - ClientUtils.waitForInstantClientSuccess(testStorage); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); // scale up final int scaleTo = initialReplicas + 2; @@ -201,9 +228,9 @@ void testKafkaScaleUpScaleDown() { // consuming data from original topic after scaling up LOGGER.info("Consume data produced before scaling up"); - clientsBeforeScale.generateNewConsumerGroup(); - KubeResourceManager.get().createResourceWithWait(clientsBeforeScale.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + kafkaProducerConsumer.setConsumerGroup(ClientUtils.generateRandomConsumerGroup()); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); // new topic has more replicas than there was available Kafka brokers before scaling up LOGGER.info("Create new KafkaTopic with replica count requiring existence of brokers added by scaling up"); @@ -215,14 +242,15 @@ void testKafkaScaleUpScaleDown() { KubeResourceManager.get().createResourceWithWait(scaledUpKafkaTopicResource); LOGGER.info("Produce and consume messages into KafkaTopic {}/{}", testStorage.getNamespaceName(), topicNameScaledUp); - final KafkaClients clientsAfterScaleUp = ClientUtils.getInstantTlsClientBuilder(testStorage) - .withTopicName(topicNameScaledUp) - .build(); + kafkaProducerConsumer.setTopicName(topicNameScaledUp); + kafkaProducerConsumer.setConsumerGroup(ClientUtils.generateRandomConsumerGroup()); + KubeResourceManager.get().createResourceWithWait( - clientsAfterScaleUp.producerTlsStrimzi(testStorage.getClusterName()), - clientsAfterScaleUp.consumerTlsStrimzi(testStorage.getClusterName()) + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() ); - ClientUtils.waitForInstantClientSuccess(testStorage); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); LOGGER.info("Verify number of PVCs is increased to 5 after scaling Kafka: {}/{} Up to 5 replicas", testStorage.getNamespaceName(), testStorage.getClusterName()); assertThat((int) PersistentVolumeClaimUtils.listPVCsByNameSubstring(testStorage.getNamespaceName(), testStorage.getClusterName()).stream().filter( @@ -240,9 +268,9 @@ void testKafkaScaleUpScaleDown() { // consuming from original topic (i.e. created before scaling) LOGGER.info("Consume data from topic {}/{} where data were produced before scaling up and down", testStorage.getNamespaceName(), testStorage.getTopicName()); - clientsBeforeScale.generateNewConsumerGroup(); - KubeResourceManager.get().createResourceWithWait(clientsBeforeScale.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + kafkaProducerConsumer.setConsumerGroup(ClientUtils.generateRandomConsumerGroup()); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); PersistentVolumeClaimUtils.waitForPersistentVolumeClaimDeletion(testStorage, initialReplicas); @@ -250,14 +278,16 @@ void testKafkaScaleUpScaleDown() { LOGGER.info("Creating new KafkaTopic: {}/{} and producing consuming data", testStorage.getNamespaceName(), topicNameScaledBackDown); KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), topicNameScaledBackDown, testStorage.getClusterName()).build()); - final KafkaClients clientsTopicAfterScaleDown = ClientUtils.getInstantTlsClientBuilder(testStorage) - .withTopicName(topicNameScaledBackDown) - .build(); + + kafkaProducerConsumer.setTopicName(topicNameScaledBackDown); + kafkaProducerConsumer.setConsumerGroup(ClientUtils.generateRandomConsumerGroup()); KubeResourceManager.get().createResourceWithWait( - clientsTopicAfterScaleDown.producerTlsStrimzi(testStorage.getClusterName()), - clientsTopicAfterScaleDown.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantClientSuccess(testStorage); + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } /** diff --git a/systemtest/src/test/java/io/strimzi/systemtest/security/NetworkPoliciesST.java b/systemtest/src/test/java/io/strimzi/systemtest/security/NetworkPoliciesST.java index 7bb6b2973bf..08ca065915a 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/security/NetworkPoliciesST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/security/NetworkPoliciesST.java @@ -23,7 +23,7 @@ import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.IsolatedTest; import io.strimzi.systemtest.annotations.SkipDefaultNetworkPolicyCreation; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; +import io.strimzi.systemtest.kafkaclients.ClientsAuthentication; import io.strimzi.systemtest.metrics.KafkaExporterMetricsComponent; import io.strimzi.systemtest.resources.CrdClients; import io.strimzi.systemtest.resources.operator.ClusterOperatorConfigurationBuilder; @@ -38,6 +38,8 @@ import io.strimzi.systemtest.utils.ClientUtils; import io.strimzi.systemtest.utils.kafkaUtils.KafkaUtils; import io.strimzi.systemtest.utils.specific.MetricsUtils; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.Tag; @@ -176,39 +178,61 @@ void testNetworkPoliciesOnListenersWhenOperatorIsInSameNamespaceAsOperands() { ); LOGGER.info("Initialize producers and consumers with access to the Kafka using plain and tls listeners"); - final KafkaClients kafkaClientsWithAccessPlain = ClientUtils.getInstantScramShaOverPlainClientBuilder(testStorage) + final KafkaProducerConsumer kafkaProducerConsumerWithAccessPlain = new KafkaProducerConsumerBuilder() .withProducerName(producerNameAccessedPlain) .withConsumerName(consumerNameAccessedPlain) + .withNamespaceName(testStorage.getNamespaceName()) .withTopicName(topicNameAccessedPlain) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configurePlainScramSha(testStorage.getNamespaceName(), testStorage.getUsername())) .build(); - final KafkaClients kafkaClientsWithAccessTls = ClientUtils.getInstantScramShaOverTlsClientBuilder(testStorage) + + final KafkaProducerConsumer kafkaProducerConsumerWithAccessTls = new KafkaProducerConsumerBuilder() .withProducerName(producerNameAccessedTls) .withConsumerName(consumerNameAccessedTls) + .withNamespaceName(testStorage.getNamespaceName()) .withTopicName(topicNameAccessedTls) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTlsScramSha(testStorage.getNamespaceName(), testStorage.getUsername(), testStorage.getClusterName())) .build(); LOGGER.info("Initialize producers and consumers without access (denied) to the Kafka using plain and tls listeners"); - final KafkaClients kafkaClientsWithoutAccessPlain = ClientUtils.getInstantScramShaOverPlainClientBuilder(testStorage) + final KafkaProducerConsumer kafkaProducerConsumerWithoutAccessPlain = new KafkaProducerConsumerBuilder() .withProducerName(producerNameDeniedPlain) .withConsumerName(consumerNameDeniedPlain) + .withNamespaceName(testStorage.getNamespaceName()) .withTopicName(topicNameDeniedPlain) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configurePlainScramSha(testStorage.getNamespaceName(), testStorage.getUsername())) .build(); - final KafkaClients kafkaClientsWithoutAccessTls = ClientUtils.getInstantScramShaOverTlsClientBuilder(testStorage) + + final KafkaProducerConsumer kafkaProducerConsumerWithoutAccessTls = new KafkaProducerConsumerBuilder() .withProducerName(producerNameDeniedTls) .withConsumerName(consumerNameDeniedTls) + .withNamespaceName(testStorage.getNamespaceName()) .withTopicName(topicNameDeniedTls) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTlsScramSha(testStorage.getNamespaceName(), testStorage.getUsername(), testStorage.getClusterName())) .build(); LOGGER.info("Deploy all initialized clients"); KubeResourceManager.get().createResourceWithWait( - kafkaClientsWithAccessPlain.producerScramShaPlainStrimzi(), - kafkaClientsWithAccessPlain.consumerScramShaPlainStrimzi(), - kafkaClientsWithAccessTls.producerScramShaTlsStrimzi(testStorage.getClusterName()), - kafkaClientsWithAccessTls.consumerScramShaTlsStrimzi(testStorage.getClusterName()), - kafkaClientsWithoutAccessPlain.producerScramShaPlainStrimzi(), - kafkaClientsWithoutAccessPlain.consumerScramShaPlainStrimzi(), - kafkaClientsWithoutAccessTls.producerScramShaTlsStrimzi(testStorage.getClusterName()), - kafkaClientsWithoutAccessTls.consumerScramShaTlsStrimzi(testStorage.getClusterName()) + kafkaProducerConsumerWithAccessPlain.getProducer().getJob(), + kafkaProducerConsumerWithAccessPlain.getConsumer().getJob(), + kafkaProducerConsumerWithAccessTls.getProducer().getJob(), + kafkaProducerConsumerWithAccessTls.getConsumer().getJob(), + kafkaProducerConsumerWithoutAccessPlain.getProducer().getJob(), + kafkaProducerConsumerWithoutAccessPlain.getConsumer().getJob(), + kafkaProducerConsumerWithoutAccessTls.getProducer().getJob(), + kafkaProducerConsumerWithoutAccessTls.getConsumer().getJob() ); LOGGER.info("Verifying that clients: {}, {}, {}, {} are all allowed to communicate", producerNameAccessedPlain, consumerNameAccessedPlain, producerNameAccessedTls, consumerNameAccessedTls); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/security/PodSecurityProfilesST.java b/systemtest/src/test/java/io/strimzi/systemtest/security/PodSecurityProfilesST.java index eed9ca6f31b..75929ca6385 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/security/PodSecurityProfilesST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/security/PodSecurityProfilesST.java @@ -8,6 +8,8 @@ import io.fabric8.kubernetes.api.model.EnvVarBuilder; import io.fabric8.kubernetes.api.model.Pod; import io.fabric8.kubernetes.api.model.SecurityContext; +import io.fabric8.kubernetes.api.model.batch.v1.Job; +import io.fabric8.kubernetes.api.model.batch.v1.JobBuilder; import io.skodjob.kubetest4j.resources.KubeResourceManager; import io.skodjob.kubetest4j.utils.KubeUtils; import io.strimzi.api.kafka.model.kafka.KafkaResources; @@ -16,9 +18,6 @@ import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.ParallelNamespaceTest; import io.strimzi.systemtest.annotations.RequiredMinKubeOrOcpBasedKubeVersion; -import io.strimzi.systemtest.enums.PodSecurityProfile; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder; import io.strimzi.systemtest.resources.crd.KafkaComponents; import io.strimzi.systemtest.resources.operator.ClusterOperatorConfigurationBuilder; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; @@ -33,6 +32,8 @@ import io.strimzi.systemtest.utils.ClientUtils; import io.strimzi.systemtest.utils.kafkaUtils.KafkaConnectUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.hamcrest.CoreMatchers; @@ -156,14 +157,22 @@ void testOperandsWithRestrictedSecurityProfile() { // Messages produced to Main Kafka Cluster (source) will be sinked to file, and mirrored into targeted Kafkas to later verify Operands work correctly. LOGGER.info("Transmit messages in Cluster: {}/{}", testStorage.getNamespaceName(), testStorage.getClusterName()); - final KafkaClients kafkaClients = ClientUtils.getInstantPlainClientBuilder(testStorage) - .withPodSecurityPolicy(PodSecurityProfile.RESTRICTED) - .build(); + final KafkaProducerConsumerBuilder kafkaProducerConsumerBuilder = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()); + + final KafkaProducerConsumer kafkaProducerConsumer = kafkaProducerConsumerBuilder.build(); + KubeResourceManager.get().createResourceWithWait( - kafkaClients.producerStrimzi(), - kafkaClients.consumerStrimzi() + applyRestrictedSecurityProfileToClientJob(kafkaProducerConsumer.getProducer().getJob()), + applyRestrictedSecurityProfileToClientJob(kafkaProducerConsumer.getConsumer().getJob()) ); - ClientUtils.waitForInstantClientSuccess(testStorage); + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); // verifies that Pods and Containers have proper generated SC final List podsWithProperlyGeneratedSecurityContexts = new ArrayList<>(PodUtils.getKafkaClusterPods(testStorage)); @@ -177,17 +186,16 @@ void testOperandsWithRestrictedSecurityProfile() { KafkaConnectUtils.waitForMessagesInKafkaConnectFileSink(testStorage.getNamespaceName(), kafkaConnectPodName, TestConstants.DEFAULT_SINK_FILE_PATH, testStorage.getMessageCount()); // verify MM2 - final KafkaClients mm2Client = ClientUtils.getInstantPlainClientBuilder(testStorage, KafkaResources.plainBootstrapAddress(mm2TargetClusterName)) + final KafkaProducerConsumer mm2Client = kafkaProducerConsumerBuilder + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(mm2TargetClusterName)) .withTopicName(mm2SourceMirroredTopicName) - .withPodSecurityPolicy(PodSecurityProfile.RESTRICTED) .build(); - KubeResourceManager.get().createResourceWithWait(mm2Client.consumerStrimzi()); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + + KubeResourceManager.get().createResourceWithWait(applyRestrictedSecurityProfileToClientJob(mm2Client.getConsumer().getJob())); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); // verify that client incorrectly configured Pod Security Profile wont successfully communicate. - final KafkaClients incorrectKafkaClients = new KafkaClientsBuilder(kafkaClients) - .withPodSecurityPolicy(PodSecurityProfile.DEFAULT) - .build(); + final Job invalidProducerJob = kafkaProducerConsumer.getProducer().getJob(); // excepted failure // job_controller.go:1437 pods "..." is forbidden: violates PodSecurity "restricted:latest": allowPrivilegeEscalation != false @@ -195,8 +203,37 @@ void testOperandsWithRestrictedSecurityProfile() { // unrestricted capabilities (container "..." must set securityContext.capabilities.drop=["ALL"]), // runAsNonRoot != true (pod or container "..." must set securityContext.runAsNonRoot=true), // seccompProfile (pod or container "..." must set securityContext.seccompProfile.type to "RuntimeDefault" or "Localhost") - KubeResourceManager.get().createResourceWithoutWait(incorrectKafkaClients.producerStrimzi()); - ClientUtils.waitForInstantProducerClientTimeout(testStorage); + KubeResourceManager.get().createResourceWithoutWait(invalidProducerJob); + ClientUtils.waitForClientTimeout(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); + } + + private Job applyRestrictedSecurityProfileToClientJob(Job clientJob) { + return new JobBuilder(clientJob) + .editSpec() + .editTemplate() + .editSpec() + .withNewSecurityContext() + .withRunAsNonRoot(true) + .withNewSeccompProfile() + .withType("RuntimeDefault") + .endSeccompProfile() + .endSecurityContext() + .editFirstContainer() + .withNewSecurityContext() + .withAllowPrivilegeEscalation(false) + .withNewCapabilities() + .withDrop("ALL") + .endCapabilities() + .withRunAsNonRoot(true) + .withNewSeccompProfile() + .withType("RuntimeDefault") + .endSeccompProfile() + .endSecurityContext() + .endContainer() + .endSpec() + .endTemplate() + .endSpec() + .build(); } @BeforeAll diff --git a/systemtest/src/test/java/io/strimzi/systemtest/security/SecurityST.java b/systemtest/src/test/java/io/strimzi/systemtest/security/SecurityST.java index e267c0e0218..a84769c588b 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/security/SecurityST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/security/SecurityST.java @@ -30,9 +30,8 @@ import io.strimzi.systemtest.AbstractST; import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.ParallelNamespaceTest; +import io.strimzi.systemtest.kafkaclients.ClientsAuthentication; import io.strimzi.systemtest.kafkaclients.externalClients.ExternalKafkaClient; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder; import io.strimzi.systemtest.resources.CrdClients; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; import io.strimzi.systemtest.storage.TestStorage; @@ -52,6 +51,8 @@ import io.strimzi.systemtest.utils.kubeUtils.objects.SecretUtils; import io.strimzi.test.ReadWriteUtils; import io.strimzi.test.TestUtils; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.kafka.common.config.SslConfigs; import org.apache.kafka.common.errors.GroupAuthorizationException; import org.apache.kafka.common.security.auth.SecurityProtocol; @@ -164,9 +165,23 @@ void autoRenewSomeCaCertsTriggeredByAnno( KafkaTopicTemplates.topic(testStorage).build() ); - KafkaClients kafkaClients = ClientUtils.getInstantTlsClients(testStorage); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(testStorage.getClusterName()), kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getClusterName(), testStorage.getUsername())) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); // Get all pods, and their resource versions Map controllerPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getControllerSelector()); @@ -217,22 +232,21 @@ void autoRenewSomeCaCertsTriggeredByAnno( value, is(not(initialCaCerts.get(secretName)))); } - kafkaClients.generateNewConsumerGroup(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + kafkaProducerConsumer.setConsumerGroup(ClientUtils.generateRandomConsumerGroup()); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); // Check a new client (signed by new client key) can consume String bobUserName = "bob-" + testStorage.getUsername(); KubeResourceManager.get().createResourceWithWait(KafkaUserTemplates.tlsUser(testStorage.getNamespaceName(), bobUserName, testStorage.getClusterName()).build()); - kafkaClients = new KafkaClientsBuilder(kafkaClients) - .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) - .withUsername(bobUserName) - .build(); + kafkaProducerConsumer.setConsumerGroup(ClientUtils.generateRandomConsumerGroup()); + kafkaProducerConsumer.setAuthentication(ClientsAuthentication.configureTls(testStorage.getClusterName(), bobUserName)); - KubeResourceManager.get().createResourceWithWait(kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); if (!kafkaShouldRoll) { assertThat("Kafka Pods should not roll, but did.", PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getBrokerSelector()), is(brokerPods)); @@ -311,9 +325,23 @@ void autoReplaceSomeKeysTriggeredByAnno(int expectedRolls, KafkaTopicTemplates.topic(testStorage).build() ); - KafkaClients kafkaClients = ClientUtils.getInstantTlsClients(testStorage); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(testStorage.getClusterName()), kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getClusterName(), testStorage.getUsername())) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); // Get all pods, and their resource versions Map controllerPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getControllerSelector()); @@ -387,9 +415,9 @@ void autoReplaceSomeKeysTriggeredByAnno(int expectedRolls, value, is(not(initialCaKeys.get(secretName)))); } - kafkaClients.generateNewConsumerGroup(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + kafkaProducerConsumer.setConsumerGroup(ClientUtils.generateRandomConsumerGroup()); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); // Finally check a new client (signed by new client key) can consume @@ -397,13 +425,11 @@ void autoReplaceSomeKeysTriggeredByAnno(int expectedRolls, KubeResourceManager.get().createResourceWithWait(KafkaUserTemplates.tlsUser(testStorage.getNamespaceName(), bobUserName, testStorage.getClusterName()).build()); - kafkaClients = new KafkaClientsBuilder(kafkaClients) - .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) - .withUsername(bobUserName) - .build(); + kafkaProducerConsumer.setConsumerGroup(ClientUtils.generateRandomConsumerGroup()); + kafkaProducerConsumer.setAuthentication(ClientsAuthentication.configureTls(testStorage.getClusterName(), bobUserName)); - KubeResourceManager.get().createResourceWithWait(kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); if (!kafkaShouldRoll) { assertThat("Controller Pods should not roll, but did.", PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getControllerSelector()), is(controllerPods)); @@ -493,9 +519,23 @@ void testAutoRenewCaCertsTriggerByExpiredCertificate() { KafkaTopicTemplates.topic(testStorage).build() ); - final KafkaClients kafkaClients = ClientUtils.getInstantTlsClients(testStorage); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(testStorage.getClusterName()), kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getClusterName(), testStorage.getUsername())) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); // Wait until the certificates have been replaced SecretUtils.waitForCertToChange(testStorage.getNamespaceName(), clusterCaCert, clusterCaCertificateSecretName(testStorage.getClusterName())); @@ -503,8 +543,12 @@ void testAutoRenewCaCertsTriggerByExpiredCertificate() { // Wait until the pods are all up and ready KafkaUtils.waitForClusterStability(testStorage.getNamespaceName(), testStorage.getClusterName()); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(testStorage.getClusterName()), kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } @ParallelNamespaceTest @@ -601,9 +645,23 @@ void testCertRenewalInMaintenanceTimeWindow() { assertThat("KafkaUser certificate has not been renewed within maintenanceTimeWindows", kafkaUserSecret, not(sameInstance(kafkaUserSecretRolled))); - final KafkaClients kafkaClients = ClientUtils.getInstantTlsClients(testStorage); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(testStorage.getClusterName()), kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getClusterName(), testStorage.getUsername())) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } @ParallelNamespaceTest @@ -657,9 +715,23 @@ void testCertRegeneratedAfterInternalCAisDeleted() { assertThat("Certificates has different cert UIDs", !secrets.get(i).getData().get("ca.crt").equals(regeneratedSecrets.get(i).getData().get("ca.crt"))); } - final KafkaClients kafkaClients = ClientUtils.getInstantTlsClients(testStorage); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(testStorage.getClusterName()), kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getClusterName(), testStorage.getUsername())) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } @ParallelNamespaceTest @@ -923,9 +995,23 @@ void testCaRenewalBreakInMiddle() { .build() ); - KafkaClients kafkaClients = ClientUtils.getInstantTlsClients(testStorage); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(testStorage.getClusterName()), kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getClusterName(), testStorage.getUsername())) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); Map controllerPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getControllerSelector()); Map brokerPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), testStorage.getBrokerSelector()); @@ -959,9 +1045,9 @@ void testCaRenewalBreakInMiddle() { } ); - kafkaClients.generateNewConsumerGroup(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + kafkaProducerConsumer.setConsumerGroup(ClientUtils.generateRandomConsumerGroup()); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); final ResourceRequirements correctRequirements = new ResourceRequirementsBuilder() .addToRequests("cpu", new Quantity("200m")) @@ -976,22 +1062,19 @@ void testCaRenewalBreakInMiddle() { RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(testStorage.getNamespaceName(), testStorage.getBrokerSelector(), 3, brokerPods); DeploymentUtils.waitTillDepHasRolled(testStorage.getNamespaceName(), testStorage.getEoDeploymentName(), 1, eoPods); - kafkaClients.generateNewConsumerGroup(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + kafkaProducerConsumer.setConsumerGroup(ClientUtils.generateRandomConsumerGroup()); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); // Try to send and receive messages with new certificates String topicName = KafkaTopicUtils.generateRandomNameOfTopic(); KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), topicName, testStorage.getClusterName()).build()); - kafkaClients = new KafkaClientsBuilder(kafkaClients) - .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) - .withTopicName(topicName) - .build(); - - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(testStorage.getClusterName()), kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantClientSuccess(testStorage); + kafkaProducerConsumer.setConsumerGroup(ClientUtils.generateRandomConsumerGroup()); + kafkaProducerConsumer.setTopicName(topicName); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); } @ParallelNamespaceTest @@ -1416,9 +1499,23 @@ void testBrokerCertificatesIncludeFullCaChain() { KafkaTopicTemplates.topic(testStorage).build() ); - KafkaClients kafkaClients = ClientUtils.getInstantTlsClients(testStorage); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(testStorage.getClusterName()), kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getClusterName(), testStorage.getUsername())) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } @BeforeAll diff --git a/systemtest/src/test/java/io/strimzi/systemtest/security/custom/CustomAuthorizerST.java b/systemtest/src/test/java/io/strimzi/systemtest/security/custom/CustomAuthorizerST.java index 1276bffd4f4..3fd8478ca31 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/security/custom/CustomAuthorizerST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/security/custom/CustomAuthorizerST.java @@ -22,8 +22,7 @@ import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.ParallelTest; import io.strimzi.systemtest.docs.TestDocsLabels; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder; +import io.strimzi.systemtest.kafkaclients.ClientsAuthentication; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; import io.strimzi.systemtest.storage.TestStorage; import io.strimzi.systemtest.templates.crd.KafkaNodePoolTemplates; @@ -31,6 +30,8 @@ import io.strimzi.systemtest.templates.crd.KafkaTopicTemplates; import io.strimzi.systemtest.templates.crd.KafkaUserTemplates; import io.strimzi.systemtest.utils.ClientUtils; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.BeforeAll; @@ -112,28 +113,32 @@ void testAclRuleReadAndWrite() { LOGGER.info("Checking KafkaUser {} that is able to send messages to Topic: {}", kafkaUserWrite, testStorage.getTopicName()); - KafkaClients kafkaClients = ClientUtils.getInstantTlsClientBuilder(testStorage, KafkaResources.tlsBootstrapAddress(sharedTestStorage.getClusterName())) - .withUsername(kafkaUserWrite) + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) .withConsumerGroup(consumerGroupName) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(sharedTestStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTls(sharedTestStorage.getClusterName(), kafkaUserWrite)) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(sharedTestStorage.getClusterName())); - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getProducer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); - KubeResourceManager.get().createResourceWithWait(kafkaClients.consumerTlsStrimzi(sharedTestStorage.getClusterName())); - ClientUtils.waitForInstantConsumerClientTimeout(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientTimeout(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); - kafkaClients = new KafkaClientsBuilder(kafkaClients) - .withUsername(kafkaUserRead) - .build(); + kafkaProducerConsumer.setAuthentication(ClientsAuthentication.configureTls(sharedTestStorage.getClusterName(), kafkaUserRead)); - KubeResourceManager.get().createResourceWithWait(kafkaClients.consumerTlsStrimzi(sharedTestStorage.getClusterName())); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); LOGGER.info("Checking KafkaUser: {}/{} that is not able to send messages to Topic: {}", testStorage.getNamespaceName(), kafkaUserRead, testStorage.getTopicName()); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(sharedTestStorage.getClusterName())); - ClientUtils.waitForInstantProducerClientTimeout(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getProducer().getJob()); + ClientUtils.waitForClientTimeout(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); } @ParallelTest @@ -153,13 +158,24 @@ void testAclWithSuperUser() { KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(Environment.TEST_SUITE_NAMESPACE, testStorage.getTopicName(), sharedTestStorage.getClusterName()).build()); KubeResourceManager.get().createResourceWithWait(KafkaUserTemplates.tlsUser(Environment.TEST_SUITE_NAMESPACE, ADMIN, sharedTestStorage.getClusterName()).build()); - final KafkaClients kafkaClients = ClientUtils.getInstantTlsClientBuilder(testStorage, KafkaResources.tlsBootstrapAddress(sharedTestStorage.getClusterName())) - .withUsername(ADMIN) + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(sharedTestStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTls(sharedTestStorage.getClusterName(), ADMIN)) .build(); LOGGER.info("Checking Kafka Super User: {}/{} is able to produce/consume despite having no explicit rights in KafkaUser", Environment.TEST_SUITE_NAMESPACE, ADMIN); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(sharedTestStorage.getClusterName()), kafkaClients.consumerTlsStrimzi(sharedTestStorage.getClusterName())); - ClientUtils.waitForInstantClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } @BeforeAll diff --git a/systemtest/src/test/java/io/strimzi/systemtest/security/custom/CustomCaChainST.java b/systemtest/src/test/java/io/strimzi/systemtest/security/custom/CustomCaChainST.java index d7b7a83e0c4..86f1d8ca777 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/security/custom/CustomCaChainST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/security/custom/CustomCaChainST.java @@ -18,7 +18,7 @@ import io.strimzi.systemtest.AbstractST; import io.strimzi.systemtest.annotations.ParallelNamespaceTest; import io.strimzi.systemtest.docs.TestDocsLabels; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; +import io.strimzi.systemtest.kafkaclients.ClientsAuthentication; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; import io.strimzi.systemtest.security.SystemTestCertBundle; import io.strimzi.systemtest.security.SystemTestCertGenerator; @@ -32,6 +32,8 @@ import io.strimzi.systemtest.utils.kafkaUtils.KafkaConnectUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.NetworkPolicyUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.SecretUtils; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.BeforeAll; @@ -120,14 +122,25 @@ void testMultistageCustomCaUserCertificateAuthentication() { SecretUtils.createCustomCertSecret(testStorage.getNamespaceName(), testStorage.getClusterName(), leafSignedClientName, leafSignedClientFiles, "user"); - final KafkaClients leafSignedClients = ClientUtils.getInstantTlsClientBuilder(testStorage) - .withUsername(leafSignedClientName) + final KafkaProducerConsumerBuilder kafkaProducerConsumerBaseline = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()); + + final KafkaProducerConsumer leafSignedLafkaProducerConsumer = kafkaProducerConsumerBaseline + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getClusterName(), leafSignedClientName)) .build(); + KubeResourceManager.get().createResourceWithWait( - leafSignedClients.producerTlsStrimzi(testStorage.getClusterName()), - leafSignedClients.consumerTlsStrimzi(testStorage.getClusterName()) + leafSignedLafkaProducerConsumer.getProducer().getJob(), + leafSignedLafkaProducerConsumer.getConsumer().getJob() ); - ClientUtils.waitForInstantClientSuccess(testStorage); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); // ii.) Create a client certificate signed by the Clients CA Root and verify it is rejected LOGGER.info("Testing that client with Root-CA-signed certificate is rejected"); @@ -139,14 +152,16 @@ void testMultistageCustomCaUserCertificateAuthentication() { SecretUtils.createCustomCertSecret(testStorage.getNamespaceName(), testStorage.getClusterName(), rootSignedClientName, rootSignedClientFiles, "user"); - final KafkaClients rootSignedClients = ClientUtils.getInstantTlsClientBuilder(testStorage) - .withUsername(rootSignedClientName) + final KafkaProducerConsumer rootSignedKafkaProducerConsumer = kafkaProducerConsumerBaseline + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getClusterName(), rootSignedClientName)) .build(); + KubeResourceManager.get().createResourceWithWait( - rootSignedClients.producerTlsStrimzi(testStorage.getClusterName()), - rootSignedClients.consumerTlsStrimzi(testStorage.getClusterName()) + rootSignedKafkaProducerConsumer.getProducer().getJob(), + rootSignedKafkaProducerConsumer.getConsumer().getJob() ); - ClientUtils.waitForInstantClientsTimeout(testStorage); + + ClientUtils.waitForClientsTimeout(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); // iii.) Create a client certificate signed by the Clients CA Intermediate and verify it is rejected LOGGER.info("Testing that client with Intermediate-CA-signed certificate is rejected"); @@ -158,14 +173,16 @@ void testMultistageCustomCaUserCertificateAuthentication() { SecretUtils.createCustomCertSecret(testStorage.getNamespaceName(), testStorage.getClusterName(), intermediateSignedClientName, intermediateSignedClientFiles, "user"); - final KafkaClients intermediateSignedClients = ClientUtils.getInstantTlsClientBuilder(testStorage) - .withUsername(intermediateSignedClientName) + final KafkaProducerConsumer intermediateSignedKafkaProducerConsumer = kafkaProducerConsumerBaseline + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getClusterName(), intermediateSignedClientName)) .build(); + KubeResourceManager.get().createResourceWithWait( - intermediateSignedClients.producerTlsStrimzi(testStorage.getClusterName()), - intermediateSignedClients.consumerTlsStrimzi(testStorage.getClusterName()) + intermediateSignedKafkaProducerConsumer.getProducer().getJob(), + intermediateSignedKafkaProducerConsumer.getConsumer().getJob() ); - ClientUtils.waitForInstantClientsTimeout(testStorage); + + ClientUtils.waitForClientsTimeout(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } @ParallelNamespaceTest @@ -272,26 +289,44 @@ void testMultistageCustomCaTrustChainEstablishment() { final String[] trustSecretNames = {trustFullChainName, trustRootIntermediateName, trustRootOnlyName, trustIntermediateOnlyName, trustLeafOnlyName}; for (String trustSecretName : trustSecretNames) { LOGGER.info("Testing trust establishment with trust secret: {}", trustSecretName); - final KafkaClients kafkaClients = ClientUtils.getInstantTlsClientBuilder(testStorage) - .withCaCertSecretName(trustSecretName) + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTlsCustomCerts(trustSecretName, testStorage.getUsername())) .build(); + KubeResourceManager.get().createResourceWithWait( - kafkaClients.producerTlsStrimzi(testStorage.getClusterName()), - kafkaClients.consumerTlsStrimzi(testStorage.getClusterName()) + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() ); - ClientUtils.waitForInstantClientSuccess(testStorage); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } // Verify foreign CA trust fails LOGGER.info("Testing that foreign CA trust secret fails to establish trust"); - final KafkaClients foreignClients = ClientUtils.getInstantTlsClientBuilder(testStorage) - .withCaCertSecretName(trustForeignName) + final KafkaProducerConsumer foreignKafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTlsCustomCerts(trustForeignName, testStorage.getUsername())) .build(); + KubeResourceManager.get().createResourceWithWait( - foreignClients.producerTlsStrimzi(testStorage.getClusterName()), - foreignClients.consumerTlsStrimzi(testStorage.getClusterName()) + foreignKafkaProducerConsumer.getProducer().getJob(), + foreignKafkaProducerConsumer.getConsumer().getJob() ); - ClientUtils.waitForInstantClientsTimeout(testStorage); + + ClientUtils.waitForClientsTimeout(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } @ParallelNamespaceTest @@ -368,15 +403,24 @@ void testCustomCaTrustChainOnInternalPort() { SecretUtils.createCustomCertSecret(testStorage.getNamespaceName(), testStorage.getClusterName(), leafSignedClientName, leafSignedClientFiles, "user"); - final KafkaClients leafSignedClients = ClientUtils.getInstantTlsClientBuilder(testStorage, internalBootstrapAddress) - .withUsername(leafSignedClientName) - .withCaCertSecretName(trustFullChainName) + final KafkaProducerConsumerBuilder kafkaProducerConsumerBaseline = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()); + + final KafkaProducerConsumer leafSignedClients = kafkaProducerConsumerBaseline + .withAuthentication(ClientsAuthentication.configureTlsCustomCerts(trustFullChainName, leafSignedClientName)) .build(); + KubeResourceManager.get().createResourceWithWait( - leafSignedClients.producerTlsStrimzi(testStorage.getClusterName()), - leafSignedClients.consumerTlsStrimzi(testStorage.getClusterName()) + leafSignedClients.getProducer().getJob(), + leafSignedClients.getConsumer().getJob() ); - ClientUtils.waitForInstantClientSuccess(testStorage); + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); // ii.) Create a client certificate signed by the Root CA and verify it is rejected on port 9091 LOGGER.info("Testing that client with Root-CA-signed certificate is rejected on port 9091"); @@ -388,15 +432,17 @@ void testCustomCaTrustChainOnInternalPort() { SecretUtils.createCustomCertSecret(testStorage.getNamespaceName(), testStorage.getClusterName(), rootSignedClientName, rootSignedClientFiles, "user"); - final KafkaClients rootSignedClients = ClientUtils.getInstantTlsClientBuilder(testStorage, internalBootstrapAddress) - .withUsername(rootSignedClientName) - .withCaCertSecretName(trustFullChainName) + final KafkaProducerConsumer rootSignedClients = kafkaProducerConsumerBaseline + .withBootstrapAddress(internalBootstrapAddress) + .withAuthentication(ClientsAuthentication.configureTlsCustomCerts(trustFullChainName, rootSignedClientName)) .build(); + KubeResourceManager.get().createResourceWithWait( - rootSignedClients.producerTlsStrimzi(testStorage.getClusterName()), - rootSignedClients.consumerTlsStrimzi(testStorage.getClusterName()) + rootSignedClients.getProducer().getJob(), + rootSignedClients.getConsumer().getJob() ); - ClientUtils.waitForInstantClientsTimeout(testStorage); + + ClientUtils.waitForClientsTimeout(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); // iii.) Create a client certificate signed by the Intermediate CA and verify it is rejected on port 9091 LOGGER.info("Testing that client with Intermediate-CA-signed certificate is rejected on port 9091"); @@ -408,15 +454,17 @@ void testCustomCaTrustChainOnInternalPort() { SecretUtils.createCustomCertSecret(testStorage.getNamespaceName(), testStorage.getClusterName(), intermediateSignedClientName, intermediateSignedClientFiles, "user"); - final KafkaClients intermediateSignedClients = ClientUtils.getInstantTlsClientBuilder(testStorage, internalBootstrapAddress) - .withUsername(intermediateSignedClientName) - .withCaCertSecretName(trustFullChainName) + final KafkaProducerConsumer intermediateSignedClients = kafkaProducerConsumerBaseline + .withBootstrapAddress(internalBootstrapAddress) + .withAuthentication(ClientsAuthentication.configureTlsCustomCerts(trustFullChainName, intermediateSignedClientName)) .build(); + KubeResourceManager.get().createResourceWithWait( - intermediateSignedClients.producerTlsStrimzi(testStorage.getClusterName()), - intermediateSignedClients.consumerTlsStrimzi(testStorage.getClusterName()) + intermediateSignedClients.getProducer().getJob(), + intermediateSignedClients.getConsumer().getJob() ); - ClientUtils.waitForInstantClientsTimeout(testStorage); + + ClientUtils.waitForClientsTimeout(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } @ParallelNamespaceTest diff --git a/systemtest/src/test/java/io/strimzi/systemtest/security/custom/CustomCaST.java b/systemtest/src/test/java/io/strimzi/systemtest/security/custom/CustomCaST.java index 0f598c9582f..f971eaadafd 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/security/custom/CustomCaST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/security/custom/CustomCaST.java @@ -21,7 +21,7 @@ import io.strimzi.systemtest.AbstractST; import io.strimzi.systemtest.annotations.ParallelNamespaceTest; import io.strimzi.systemtest.docs.TestDocsLabels; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; +import io.strimzi.systemtest.kafkaclients.ClientsAuthentication; import io.strimzi.systemtest.resources.crd.KafkaComponents; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; import io.strimzi.systemtest.security.SystemTestCertBundle; @@ -38,6 +38,10 @@ import io.strimzi.systemtest.utils.kubeUtils.controllers.StrimziPodSetUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.SecretUtils; +import io.strimzi.testclients.clients.kafka.KafkaProducerClient; +import io.strimzi.testclients.clients.kafka.KafkaProducerClientBuilder; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.BeforeAll; @@ -132,9 +136,18 @@ void testReplacingCustomClusterKeyPairToInvokeRenewalProcess() { LOGGER.info("All rounds of rolling update have been finished"); // Try to produce messages - final KafkaClients kafkaBasicClientJob = ClientUtils.getInstantTlsClients(testStorage); KubeResourceManager.get().createResourceWithWait(KafkaUserTemplates.tlsUser(testStorage).build()); - KubeResourceManager.get().createResourceWithWait(kafkaBasicClientJob.producerTlsStrimzi(testStorage.getClusterName())); + + final KafkaProducerClient kafkaProducerClient = new KafkaProducerClientBuilder() + .withName(testStorage.getProducerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getClusterName(), testStorage.getUsername())) + .build(); + + KubeResourceManager.get().createResourceWithWait(kafkaProducerClient.getJob()); ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); // Get new certificate secret and assert old value is still present @@ -204,9 +217,17 @@ void testReplacingCustomClientsKeyPairToInvokeRenewalProcess() { RollingUpdateUtils.waitForNoRollingUpdate(testStorage.getNamespaceName(), testStorage.getEoSelector(), eoPod); // Try to produce messages - final KafkaClients kafkaBasicClientJob = ClientUtils.getInstantTlsClients(testStorage); KubeResourceManager.get().createResourceWithWait(KafkaUserTemplates.tlsUser(testStorage).build()); - KubeResourceManager.get().createResourceWithWait(kafkaBasicClientJob.producerTlsStrimzi(testStorage.getClusterName())); + final KafkaProducerClient kafkaProducerClient = new KafkaProducerClientBuilder() + .withName(testStorage.getProducerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getClusterName(), testStorage.getUsername())) + .build(); + + KubeResourceManager.get().createResourceWithWait(kafkaProducerClient.getJob()); ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); } @@ -279,9 +300,23 @@ void testCustomClusterCaAndClientsCaCertificates() { SystemTestCertGenerator.containsAllDN(userCert.getIssuerX500Principal().getName(), clientsCa.getSubjectDn())); LOGGER.info("Send and receive messages over TLS"); - final KafkaClients kafkaClients = ClientUtils.getInstantTlsClients(testStorage); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerTlsStrimzi(testStorage.getClusterName()), kafkaClients.consumerTlsStrimzi(testStorage.getClusterName())); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTls(testStorage.getClusterName(), testStorage.getUsername())) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } @ParallelNamespaceTest diff --git a/systemtest/src/test/java/io/strimzi/systemtest/security/oauth/OauthAuthorizationST.java b/systemtest/src/test/java/io/strimzi/systemtest/security/oauth/OauthAuthorizationST.java index edfe62709de..2399eb92aa1 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/security/oauth/OauthAuthorizationST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/security/oauth/OauthAuthorizationST.java @@ -15,8 +15,7 @@ import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.FIPSNotSupported; import io.strimzi.systemtest.annotations.ParallelTest; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaOauthClients; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaOauthClientsBuilder; +import io.strimzi.systemtest.kafkaclients.ClientsAuthentication; import io.strimzi.systemtest.keycloak.KeycloakInstance; import io.strimzi.systemtest.resources.crd.KafkaComponents; import io.strimzi.systemtest.storage.TestStorage; @@ -26,6 +25,12 @@ import io.strimzi.systemtest.templates.crd.KafkaUserTemplates; import io.strimzi.systemtest.utils.ClientUtils; import io.strimzi.systemtest.utils.kubeUtils.controllers.JobUtils; +import io.strimzi.testclients.clients.kafka.KafkaConsumerClient; +import io.strimzi.testclients.clients.kafka.KafkaConsumerClientBuilder; +import io.strimzi.testclients.clients.kafka.KafkaProducerClient; +import io.strimzi.testclients.clients.kafka.KafkaProducerClientBuilder; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.BeforeAll; @@ -79,7 +84,7 @@ void smokeTestForClients() { KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(Environment.TEST_SUITE_NAMESPACE, topicName, oauthClusterName).build()); - KafkaOauthClients teamAOauthClientJob = new KafkaOauthClientsBuilder() + final KafkaProducerConsumer teamAOauthClients = new KafkaProducerConsumerBuilder() .withNamespaceName(Environment.TEST_SUITE_NAMESPACE) .withProducerName(teamAProducerName) .withConsumerName(teamAConsumerName) @@ -87,15 +92,15 @@ void smokeTestForClients() { .withTopicName(topicName) .withMessageCount(testStorage.getMessageCount()) .withConsumerGroup(consumerGroup) - .withOauthClientId(TEAM_A_CLIENT) - .withOauthClientSecret(TEAM_A_CLIENT_SECRET) - .withOauthTokenEndpointUri(keycloakInstance.getOauthTokenEndpointUri()) + .withAuthentication(ClientsAuthentication.configureTlsOAuth(oauthClusterName, TEAM_A_CLIENT, TEAM_A_CLIENT_SECRET, keycloakInstance.getOauthTokenEndpointUri())) + .withAdditionalConfig("oauth.token.endpoint.uri=" + keycloakInstance.getOauthTokenEndpointUri()) .build(); - KubeResourceManager.get().createResourceWithWait(teamAOauthClientJob.producerStrimziOauthTls(oauthClusterName)); - ClientUtils.waitForClientSuccess(Environment.TEST_SUITE_NAMESPACE, teamAProducerName, testStorage.getMessageCount()); - KubeResourceManager.get().createResourceWithWait(teamAOauthClientJob.consumerStrimziOauthTls(oauthClusterName)); - ClientUtils.waitForClientSuccess(Environment.TEST_SUITE_NAMESPACE, teamAConsumerName, testStorage.getMessageCount()); + KubeResourceManager.get().createResourceWithWait( + teamAOauthClients.getProducer().getJob(), + teamAOauthClients.getConsumer().getJob() + ); + ClientUtils.waitForClientsSuccess(Environment.TEST_SUITE_NAMESPACE, teamAConsumerName, teamAProducerName, testStorage.getMessageCount()); } /** @@ -107,22 +112,16 @@ void smokeTestForClients() { void testTeamAWriteToTopic() { final TestStorage testStorage = new TestStorage(KubeResourceManager.get().getTestContext()); String teamAProducerName = TEAM_A_PRODUCER_NAME + "-" + testStorage.getClusterName(); - String teamAConsumerName = TEAM_A_CONSUMER_NAME + "-" + testStorage.getClusterName(); - String consumerGroup = "a-consumer_group-" + testStorage.getClusterName(); KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(Environment.TEST_SUITE_NAMESPACE, testStorage.getTopicName(), oauthClusterName).build()); - KafkaOauthClients teamAOauthClientJob = new KafkaOauthClientsBuilder() + final KafkaProducerClient teamAOauthProducer = new KafkaProducerClientBuilder() .withNamespaceName(Environment.TEST_SUITE_NAMESPACE) - .withProducerName(teamAProducerName) - .withConsumerName(teamAConsumerName) + .withName(teamAProducerName) .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(oauthClusterName)) .withTopicName(testStorage.getTopicName()) .withMessageCount(testStorage.getMessageCount()) - .withConsumerGroup(consumerGroup) - .withOauthClientId(TEAM_A_CLIENT) - .withOauthClientSecret(TEAM_A_CLIENT_SECRET) - .withOauthTokenEndpointUri(keycloakInstance.getOauthTokenEndpointUri()) + .withAuthentication(ClientsAuthentication.configureTlsOAuth(oauthClusterName, TEAM_A_CLIENT, TEAM_A_CLIENT_SECRET, keycloakInstance.getOauthTokenEndpointUri())) // by default it's set to 1000, which makes the job longer to fail .withAdditionalConfig("retry.backoff.max.ms=100\n") .build(); @@ -130,37 +129,31 @@ void testTeamAWriteToTopic() { LOGGER.info("Sending {} messages to Broker with Topic name {}", testStorage.getMessageCount(), testStorage.getTopicName()); LOGGER.info("Producer will not produce messages because authorization Topic will failed. Team A can write only to Topic starting with 'x-'"); - KubeResourceManager.get().createResourceWithWait(teamAOauthClientJob.producerStrimziOauthTls(oauthClusterName)); + KubeResourceManager.get().createResourceWithWait(teamAOauthProducer.getJob()); JobUtils.waitForJobFailure(Environment.TEST_SUITE_NAMESPACE, teamAProducerName, 30_000); JobUtils.deleteJobWithWait(Environment.TEST_SUITE_NAMESPACE, teamAProducerName); String topicXName = TOPIC_X + "-" + testStorage.getClusterName(); LOGGER.info("Sending {} messages to Broker with Topic name {}", testStorage.getMessageCount(), topicXName); - teamAOauthClientJob = new KafkaOauthClientsBuilder(teamAOauthClientJob) - .withConsumerGroup(consumerGroup) - .withTopicName(topicXName) - .build(); + teamAOauthProducer.setTopicName(topicXName); - KubeResourceManager.get().createResourceWithWait(teamAOauthClientJob.producerStrimziOauthTls(oauthClusterName)); + KubeResourceManager.get().createResourceWithWait(teamAOauthProducer.getJob()); JobUtils.waitForJobFailure(Environment.TEST_SUITE_NAMESPACE, teamAProducerName, 30_000); JobUtils.deleteJobWithWait(Environment.TEST_SUITE_NAMESPACE, teamAProducerName); // Team A can not create topic starting with 'x-' only write to existing on KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(Environment.TEST_SUITE_NAMESPACE, topicXName, oauthClusterName).build()); - KubeResourceManager.get().createResourceWithWait(teamAOauthClientJob.producerStrimziOauthTls(oauthClusterName)); + KubeResourceManager.get().createResourceWithWait(teamAOauthProducer.getJob()); ClientUtils.waitForClientSuccess(Environment.TEST_SUITE_NAMESPACE, teamAProducerName, testStorage.getMessageCount()); String topicAName = TOPIC_A + "-" + testStorage.getClusterName(); LOGGER.info("Sending {} messages to Broker with Topic name {}", testStorage.getMessageCount(), topicAName); - teamAOauthClientJob = new KafkaOauthClientsBuilder(teamAOauthClientJob) - .withConsumerGroup(consumerGroup) - .withTopicName(topicAName) - .build(); + teamAOauthProducer.setTopicName(topicAName); - KubeResourceManager.get().createResourceWithWait(teamAOauthClientJob.producerStrimziOauthTls(oauthClusterName)); + KubeResourceManager.get().createResourceWithWait(teamAOauthProducer.getJob()); ClientUtils.waitForClientSuccess(Environment.TEST_SUITE_NAMESPACE, teamAProducerName, testStorage.getMessageCount()); } @@ -178,7 +171,7 @@ void testTeamAReadFromTopic() { KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(Environment.TEST_SUITE_NAMESPACE, topicAName, oauthClusterName).build()); - KafkaOauthClients teamAOauthClientJob = new KafkaOauthClientsBuilder() + final KafkaProducerConsumer teamAOauthClients = new KafkaProducerConsumerBuilder() .withNamespaceName(Environment.TEST_SUITE_NAMESPACE) .withProducerName(teamAProducerName) .withConsumerName(teamAConsumerName) @@ -186,35 +179,28 @@ void testTeamAReadFromTopic() { .withTopicName(topicAName) .withMessageCount(testStorage.getMessageCount()) .withConsumerGroup(consumerGroup) - .withOauthClientId(TEAM_A_CLIENT) - .withOauthClientSecret(TEAM_A_CLIENT_SECRET) - .withOauthTokenEndpointUri(keycloakInstance.getOauthTokenEndpointUri()) + .withAuthentication(ClientsAuthentication.configureTlsOAuth(oauthClusterName, TEAM_A_CLIENT, TEAM_A_CLIENT_SECRET, keycloakInstance.getOauthTokenEndpointUri())) .build(); LOGGER.info("Sending {} messages to Broker with Topic name {}", testStorage.getMessageCount(), topicAName); - KubeResourceManager.get().createResourceWithWait(teamAOauthClientJob.producerStrimziOauthTls(oauthClusterName)); + KubeResourceManager.get().createResourceWithWait(teamAOauthClients.getProducer().getJob()); ClientUtils.waitForClientSuccess(Environment.TEST_SUITE_NAMESPACE, teamAProducerName, testStorage.getMessageCount()); // team A client shouldn't be able to consume messages with wrong consumer group - teamAOauthClientJob = new KafkaOauthClientsBuilder(teamAOauthClientJob) - .withConsumerGroup("bad_consumer_group" + testStorage.getClusterName()) - .withTopicName(topicAName) - .build(); + teamAOauthClients.setConsumerGroup("bad_consumer_group" + testStorage.getClusterName()); + teamAOauthClients.setTopicName(topicAName); - KubeResourceManager.get().createResourceWithWait(teamAOauthClientJob.consumerStrimziOauthTls(oauthClusterName)); + KubeResourceManager.get().createResourceWithWait(teamAOauthClients.getConsumer().getJob()); JobUtils.waitForJobFailure(Environment.TEST_SUITE_NAMESPACE, teamAConsumerName, 30_000); - JobUtils.deleteJobWithWait(Environment.TEST_SUITE_NAMESPACE, teamAProducerName); + JobUtils.deleteJobWithWait(Environment.TEST_SUITE_NAMESPACE, teamAConsumerName); // team A client should be able to consume messages with correct consumer group - teamAOauthClientJob = new KafkaOauthClientsBuilder(teamAOauthClientJob) - .withConsumerGroup("a-correct_consumer_group" + testStorage.getClusterName()) - .withTopicName(topicAName) - .build(); + teamAOauthClients.setConsumerGroup("a-correct_consumer_group" + testStorage.getClusterName()); - KubeResourceManager.get().createResourceWithWait(teamAOauthClientJob.producerStrimziOauthTls(oauthClusterName)); - ClientUtils.waitForClientSuccess(Environment.TEST_SUITE_NAMESPACE, teamAProducerName, testStorage.getMessageCount()); + KubeResourceManager.get().createResourceWithWait(teamAOauthClients.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(Environment.TEST_SUITE_NAMESPACE, teamAConsumerName, testStorage.getMessageCount()); } /** @@ -230,7 +216,7 @@ void testTeamBWriteToTopic() { KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(Environment.TEST_SUITE_NAMESPACE, testStorage.getTopicName(), oauthClusterName).build()); - KafkaOauthClients teamBOauthClientJob = new KafkaOauthClientsBuilder() + final KafkaProducerConsumer teamBOauthClients = new KafkaProducerConsumerBuilder() .withNamespaceName(Environment.TEST_SUITE_NAMESPACE) .withProducerName(teamBProducerName) .withConsumerName(teamBConsumerName) @@ -238,28 +224,26 @@ void testTeamBWriteToTopic() { .withTopicName(testStorage.getTopicName()) .withMessageCount(testStorage.getMessageCount()) .withConsumerGroup(consumerGroup) - .withOauthClientId(TEAM_B_CLIENT) - .withOauthClientSecret(TEAM_B_CLIENT_SECRET) - .withOauthTokenEndpointUri(keycloakInstance.getOauthTokenEndpointUri()) + .withAuthentication(ClientsAuthentication.configureTlsOAuth(oauthClusterName, TEAM_B_CLIENT, TEAM_B_CLIENT_SECRET, keycloakInstance.getOauthTokenEndpointUri())) // by default it's set to 1000, which makes the job longer to fail .withAdditionalConfig("retry.backoff.max.ms=100\n") .build(); LOGGER.info("Sending {} messages to Broker with Topic name {}", testStorage.getMessageCount(), testStorage.getTopicName()); // Producer will not produce messages because authorization topic will failed. Team A can write only to topic starting with 'x-' - KubeResourceManager.get().createResourceWithWait(teamBOauthClientJob.producerStrimziOauthTls(oauthClusterName)); + KubeResourceManager.get().createResourceWithWait(teamBOauthClients.getProducer().getJob()); JobUtils.waitForJobFailure(Environment.TEST_SUITE_NAMESPACE, teamBProducerName, 30_000); JobUtils.deleteJobWithWait(Environment.TEST_SUITE_NAMESPACE, teamBProducerName); LOGGER.info("Sending {} messages to Broker with Topic name {}", testStorage.getMessageCount(), TOPIC_B); - teamBOauthClientJob = new KafkaOauthClientsBuilder(teamBOauthClientJob) - .withConsumerGroup("x-consumer_group_b-" + testStorage.getClusterName()) - .withTopicName(TOPIC_B) - .build(); + teamBOauthClients.setConsumerGroup("x-consumer_group_b-" + testStorage.getClusterName()); + teamBOauthClients.setTopicName(TOPIC_B); - KubeResourceManager.get().createResourceWithWait(teamBOauthClientJob.producerStrimziOauthTls(oauthClusterName)); - KubeResourceManager.get().createResourceWithWait(teamBOauthClientJob.consumerStrimziOauthTls(oauthClusterName)); + KubeResourceManager.get().createResourceWithWait( + teamBOauthClients.getProducer().getJob(), + teamBOauthClients.getConsumer().getJob() + ); ClientUtils.waitForClientsSuccess(Environment.TEST_SUITE_NAMESPACE, teamBConsumerName, teamBProducerName, testStorage.getMessageCount()); } @@ -271,8 +255,6 @@ void testTeamBWriteToTopic() { void testTeamAWriteToTopicStartingWithXAndTeamBReadFromTopicStartingWithX() { final TestStorage testStorage = new TestStorage(KubeResourceManager.get().getTestContext()); String teamAProducerName = TEAM_A_PRODUCER_NAME + "-" + testStorage.getClusterName(); - String teamAConsumerName = TEAM_A_CONSUMER_NAME + "-" + testStorage.getClusterName(); - String teamBProducerName = TEAM_B_PRODUCER_NAME + "-" + testStorage.getClusterName(); String teamBConsumerName = TEAM_B_CONSUMER_NAME + "-" + testStorage.getClusterName(); // only write means that Team A can not create new topic 'x-.*' String topicXName = TOPIC_X + testStorage.getTopicName(); @@ -280,41 +262,29 @@ void testTeamAWriteToTopicStartingWithXAndTeamBReadFromTopicStartingWithX() { KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(Environment.TEST_SUITE_NAMESPACE, topicXName, oauthClusterName).build()); - KafkaOauthClients teamAOauthClientJob = new KafkaOauthClientsBuilder() + final KafkaProducerClient teamAOauthProducer = new KafkaProducerClientBuilder() .withNamespaceName(Environment.TEST_SUITE_NAMESPACE) - .withProducerName(teamAProducerName) - .withConsumerName(teamAConsumerName) + .withName(teamAProducerName) .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(oauthClusterName)) .withTopicName(topicXName) .withMessageCount(testStorage.getMessageCount()) - .withConsumerGroup(consumerGroup) - .withOauthClientId(TEAM_A_CLIENT) - .withOauthClientSecret(TEAM_A_CLIENT_SECRET) - .withOauthTokenEndpointUri(keycloakInstance.getOauthTokenEndpointUri()) + .withAuthentication(ClientsAuthentication.configureTlsOAuth(oauthClusterName, TEAM_A_CLIENT, TEAM_A_CLIENT_SECRET, keycloakInstance.getOauthTokenEndpointUri())) .build(); - teamAOauthClientJob = new KafkaOauthClientsBuilder(teamAOauthClientJob) - .withConsumerGroup("a-consumer_group" + testStorage.getClusterName()) - .withTopicName(topicXName) - .build(); - - KubeResourceManager.get().createResourceWithWait(teamAOauthClientJob.producerStrimziOauthTls(oauthClusterName)); + KubeResourceManager.get().createResourceWithWait(teamAOauthProducer.getJob()); ClientUtils.waitForClientSuccess(Environment.TEST_SUITE_NAMESPACE, teamAProducerName, testStorage.getMessageCount()); - KafkaOauthClients teamBOauthClientJob = new KafkaOauthClientsBuilder() + final KafkaConsumerClient teamBOauthConsumer = new KafkaConsumerClientBuilder() .withNamespaceName(Environment.TEST_SUITE_NAMESPACE) - .withProducerName(teamBProducerName) - .withConsumerName(teamBConsumerName) + .withName(teamBConsumerName) .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(oauthClusterName)) .withTopicName(topicXName) .withMessageCount(testStorage.getMessageCount()) - .withConsumerGroup("x-consumer_group_b-" + testStorage.getClusterName()) - .withOauthClientId(TEAM_B_CLIENT) - .withOauthClientSecret(TEAM_B_CLIENT_SECRET) - .withOauthTokenEndpointUri(keycloakInstance.getOauthTokenEndpointUri()) + .withConsumerGroup(consumerGroup) + .withAuthentication(ClientsAuthentication.configureTlsOAuth(oauthClusterName, TEAM_B_CLIENT, TEAM_B_CLIENT_SECRET, keycloakInstance.getOauthTokenEndpointUri())) .build(); - KubeResourceManager.get().createResourceWithWait(teamBOauthClientJob.consumerStrimziOauthTls(oauthClusterName)); + KubeResourceManager.get().createResourceWithWait(teamBOauthConsumer.getJob()); ClientUtils.waitForClientSuccess(Environment.TEST_SUITE_NAMESPACE, teamBConsumerName, testStorage.getMessageCount()); } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/security/oauth/OauthPlainST.java b/systemtest/src/test/java/io/strimzi/systemtest/security/oauth/OauthPlainST.java index ec090a246b9..07f14466b99 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/security/oauth/OauthPlainST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/security/oauth/OauthPlainST.java @@ -27,10 +27,7 @@ import io.strimzi.systemtest.annotations.FIPSNotSupported; import io.strimzi.systemtest.annotations.IsolatedTest; import io.strimzi.systemtest.annotations.ParallelTest; -import io.strimzi.systemtest.kafkaclients.internalClients.BridgeClients; -import io.strimzi.systemtest.kafkaclients.internalClients.BridgeClientsBuilder; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaOauthClients; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaOauthClientsBuilder; +import io.strimzi.systemtest.kafkaclients.ClientsAuthentication; import io.strimzi.systemtest.labels.LabelSelectors; import io.strimzi.systemtest.metrics.KafkaBridgeMetricsComponent; import io.strimzi.systemtest.metrics.KafkaConnectMetricsComponent; @@ -54,6 +51,16 @@ import io.strimzi.systemtest.utils.specific.MetricsUtils; import io.strimzi.test.TestUtils; import io.strimzi.test.WaitException; +import io.strimzi.testclients.clients.http.HttpProducerClient; +import io.strimzi.testclients.clients.http.HttpProducerClientBuilder; +import io.strimzi.testclients.clients.kafka.KafkaConsumerClient; +import io.strimzi.testclients.clients.kafka.KafkaConsumerClientBuilder; +import io.strimzi.testclients.clients.kafka.KafkaProducerClient; +import io.strimzi.testclients.clients.kafka.KafkaProducerClientBuilder; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; +import io.strimzi.testclients.configuration.Authentication; +import io.strimzi.testclients.configuration.AuthenticationBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.BeforeAll; @@ -64,7 +71,6 @@ import java.util.List; import java.util.Map; -import static io.strimzi.systemtest.TestConstants.HTTP_BRIDGE_DEFAULT_PORT; import static io.strimzi.systemtest.TestTags.BRIDGE; import static io.strimzi.systemtest.TestTags.CONNECT; import static io.strimzi.systemtest.TestTags.CONNECT_COMPONENTS; @@ -115,23 +121,23 @@ void testProducerConsumerWithOauthMetrics() { KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(Environment.TEST_SUITE_NAMESPACE, testStorage.getTopicName(), oauthClusterName).build()); - KafkaOauthClients oauthExampleClients = new KafkaOauthClientsBuilder() + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() .withNamespaceName(Environment.TEST_SUITE_NAMESPACE) .withProducerName(producerName) .withConsumerName(consumerName) .withBootstrapAddress(KafkaResources.plainBootstrapAddress(oauthClusterName)) .withTopicName(testStorage.getTopicName()) .withMessageCount(testStorage.getMessageCount()) - .withOauthClientId(OAUTH_CLIENT_NAME) - .withOauthClientSecret(OAUTH_CLIENT_SECRET) - .withOauthTokenEndpointUri(keycloakInstance.getOauthTokenEndpointUri()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withAuthentication(ClientsAuthentication.configureOAuthPlain(OAUTH_CLIENT_NAME, OAUTH_CLIENT_SECRET, keycloakInstance.getOauthTokenEndpointUri())) .build(); - KubeResourceManager.get().createResourceWithWait(oauthExampleClients.producerStrimziOauthPlain()); - ClientUtils.waitForClientSuccess(Environment.TEST_SUITE_NAMESPACE, producerName, testStorage.getMessageCount()); + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); - KubeResourceManager.get().createResourceWithWait(oauthExampleClients.consumerStrimziOauthPlain()); - ClientUtils.waitForClientSuccess(Environment.TEST_SUITE_NAMESPACE, consumerName, testStorage.getMessageCount()); + ClientUtils.waitForClientsSuccess(Environment.TEST_SUITE_NAMESPACE, consumerName, producerName, testStorage.getMessageCount()); assertOauthMetricsForComponent( metricsCollector.toBuilder() @@ -146,38 +152,47 @@ void testSaslPlainProducerConsumer() { String audienceProducerName = OAUTH_CLIENT_AUDIENCE_PRODUCER + "-" + testStorage.getClusterName(); String audienceConsumerName = OAUTH_CLIENT_AUDIENCE_CONSUMER + "-" + testStorage.getClusterName(); - String plainAdditionalConfig = - "sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username=%s password=%s;\n" + - "sasl.mechanism=PLAIN"; + String saslJaasConfig = "sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username=%s password=%s"; + Authentication clientAuthentication = ClientsAuthentication.configureOAuthPlain(OAUTH_CLIENT_NAME, OAUTH_CLIENT_SECRET, keycloakInstance.getOauthTokenEndpointUri()); + clientAuthentication = new AuthenticationBuilder(clientAuthentication) + .withNewSasl() + .withSaslJaasConfig(String.format(saslJaasConfig, OAUTH_CLIENT_AUDIENCE_CONSUMER, OAUTH_CLIENT_AUDIENCE_SECRET)) + .withSaslMechanism("PLAIN") + .endSasl() + .build(); - KafkaOauthClients plainSaslOauthConsumerClientsJob = new KafkaOauthClientsBuilder() + final KafkaProducerClient plainSaslOauthProducer = new KafkaProducerClientBuilder() .withNamespaceName(Environment.TEST_SUITE_NAMESPACE) - .withConsumerName(audienceConsumerName) + .withName(audienceProducerName) .withBootstrapAddress(KafkaResources.plainBootstrapAddress(oauthClusterName)) .withTopicName(testStorage.getTopicName()) .withMessageCount(testStorage.getMessageCount()) - .withOauthClientId(OAUTH_CLIENT_NAME) - .withOauthClientSecret(OAUTH_CLIENT_SECRET) - .withOauthTokenEndpointUri(keycloakInstance.getOauthTokenEndpointUri()) - .withAdditionalConfig(String.format(plainAdditionalConfig, OAUTH_CLIENT_AUDIENCE_CONSUMER, OAUTH_CLIENT_AUDIENCE_SECRET)) + .withAuthentication(new AuthenticationBuilder(clientAuthentication) + .editSasl() + .withSaslJaasConfig(String.format(saslJaasConfig, OAUTH_CLIENT_AUDIENCE_PRODUCER, OAUTH_CLIENT_AUDIENCE_SECRET)) + .endSasl() + .build() + ) .build(); - KafkaOauthClients plainSaslOauthProducerClientsJob = new KafkaOauthClientsBuilder() + final KafkaConsumerClient plainSaslOauthConsumer = new KafkaConsumerClientBuilder() .withNamespaceName(Environment.TEST_SUITE_NAMESPACE) - .withProducerName(audienceProducerName) + .withName(audienceConsumerName) .withBootstrapAddress(KafkaResources.plainBootstrapAddress(oauthClusterName)) .withTopicName(testStorage.getTopicName()) .withMessageCount(testStorage.getMessageCount()) - .withOauthClientId(OAUTH_CLIENT_NAME) - .withOauthClientSecret(OAUTH_CLIENT_SECRET) - .withOauthTokenEndpointUri(keycloakInstance.getOauthTokenEndpointUri()) - .withAdditionalConfig(String.format(plainAdditionalConfig, OAUTH_CLIENT_AUDIENCE_PRODUCER, OAUTH_CLIENT_AUDIENCE_SECRET)) + .withAuthentication(new AuthenticationBuilder(clientAuthentication) + .editSasl() + .withSaslJaasConfig(String.format(saslJaasConfig, OAUTH_CLIENT_AUDIENCE_CONSUMER, OAUTH_CLIENT_AUDIENCE_SECRET)) + .endSasl() + .build() + ) .build(); - KubeResourceManager.get().createResourceWithWait(plainSaslOauthProducerClientsJob.producerStrimziOauthPlain()); + KubeResourceManager.get().createResourceWithWait(plainSaslOauthProducer.getJob()); ClientUtils.waitForClientSuccess(Environment.TEST_SUITE_NAMESPACE, audienceProducerName, testStorage.getMessageCount()); - KubeResourceManager.get().createResourceWithWait(plainSaslOauthConsumerClientsJob.consumerStrimziOauthPlain()); + KubeResourceManager.get().createResourceWithWait(plainSaslOauthConsumer.getJob()); ClientUtils.waitForClientSuccess(Environment.TEST_SUITE_NAMESPACE, audienceConsumerName, testStorage.getMessageCount()); } @@ -193,24 +208,24 @@ void testProducerConsumerConnectWithOauthMetrics() { String producerName = OAUTH_PRODUCER_NAME + "-" + testStorage.getClusterName(); String consumerName = OAUTH_CONSUMER_NAME + "-" + testStorage.getClusterName(); - KafkaOauthClients oauthExampleClients = new KafkaOauthClientsBuilder() + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() .withNamespaceName(Environment.TEST_SUITE_NAMESPACE) .withProducerName(producerName) .withConsumerName(consumerName) .withBootstrapAddress(KafkaResources.plainBootstrapAddress(oauthClusterName)) .withTopicName(testStorage.getTopicName()) .withMessageCount(testStorage.getMessageCount()) - .withOauthClientId(OAUTH_CLIENT_NAME) - .withOauthClientSecret(OAUTH_CLIENT_SECRET) - .withOauthTokenEndpointUri(keycloakInstance.getOauthTokenEndpointUri()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withAuthentication(ClientsAuthentication.configureOAuthPlain(OAUTH_CLIENT_NAME, OAUTH_CLIENT_SECRET, keycloakInstance.getOauthTokenEndpointUri())) .build(); KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(Environment.TEST_SUITE_NAMESPACE, testStorage.getTopicName(), oauthClusterName).build()); - KubeResourceManager.get().createResourceWithWait(oauthExampleClients.producerStrimziOauthPlain()); - ClientUtils.waitForClientSuccess(Environment.TEST_SUITE_NAMESPACE, producerName, testStorage.getMessageCount()); + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); - KubeResourceManager.get().createResourceWithWait(oauthExampleClients.consumerStrimziOauthPlain()); - ClientUtils.waitForClientSuccess(Environment.TEST_SUITE_NAMESPACE, consumerName, testStorage.getMessageCount()); + ClientUtils.waitForClientsSuccess(Environment.TEST_SUITE_NAMESPACE, consumerName, producerName, testStorage.getMessageCount()); String connectJassConfig = JAAS_CONFIG_BUILDER.apply("org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule", Map.of( "oauth.token.endpoint.uri", keycloakInstance.getOauthTokenEndpointUri(), @@ -284,24 +299,25 @@ void testProducerConsumerMirrorMaker2WithOauthMetrics() { String producerName = OAUTH_PRODUCER_NAME + "-" + testStorage.getClusterName(); String consumerName = OAUTH_CONSUMER_NAME + "-" + testStorage.getClusterName(); - KafkaOauthClients oauthExampleClients = new KafkaOauthClientsBuilder() + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() .withNamespaceName(Environment.TEST_SUITE_NAMESPACE) .withProducerName(producerName) .withConsumerName(consumerName) .withBootstrapAddress(KafkaResources.plainBootstrapAddress(oauthClusterName)) .withTopicName(testStorage.getTopicName()) .withMessageCount(testStorage.getMessageCount()) - .withOauthClientId(OAUTH_CLIENT_NAME) - .withOauthClientSecret(OAUTH_CLIENT_SECRET) - .withOauthTokenEndpointUri(keycloakInstance.getOauthTokenEndpointUri()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withAuthentication(ClientsAuthentication.configureOAuthPlain(OAUTH_CLIENT_NAME, OAUTH_CLIENT_SECRET, keycloakInstance.getOauthTokenEndpointUri())) .build(); KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(Environment.TEST_SUITE_NAMESPACE, testStorage.getTopicName(), oauthClusterName).build()); - KubeResourceManager.get().createResourceWithWait(oauthExampleClients.producerStrimziOauthPlain()); - ClientUtils.waitForClientSuccess(Environment.TEST_SUITE_NAMESPACE, producerName, testStorage.getMessageCount()); - KubeResourceManager.get().createResourceWithWait(oauthExampleClients.consumerStrimziOauthPlain()); - ClientUtils.waitForClientSuccess(Environment.TEST_SUITE_NAMESPACE, consumerName, testStorage.getMessageCount()); + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(Environment.TEST_SUITE_NAMESPACE, consumerName, producerName, testStorage.getMessageCount()); String kafkaSourceClusterName = oauthClusterName; @@ -436,23 +452,21 @@ void testProducerConsumerMirrorMaker2WithOauthMetrics() { LOGGER.info("Creating new client with new consumer-group and also to point on {} cluster", testStorage.getTargetClusterName()); - KafkaOauthClients kafkaOauthClientJob = new KafkaOauthClientsBuilder() + final KafkaConsumerClient oauthConsumer = new KafkaConsumerClientBuilder() .withNamespaceName(Environment.TEST_SUITE_NAMESPACE) - .withProducerName(producerName) - .withConsumerName(consumerName) + .withName(consumerName) .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getTargetClusterName())) .withTopicName(kafkaTargetClusterTopicName) .withMessageCount(testStorage.getMessageCount()) - .withOauthClientId(OAUTH_CLIENT_NAME) - .withOauthClientSecret(OAUTH_CLIENT_SECRET) - .withOauthTokenEndpointUri(keycloakInstance.getOauthTokenEndpointUri()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withAuthentication(ClientsAuthentication.configureOAuthPlain(OAUTH_CLIENT_NAME, OAUTH_CLIENT_SECRET, keycloakInstance.getOauthTokenEndpointUri())) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaOauthClientJob.consumerStrimziOauthPlain()); + KubeResourceManager.get().createResourceWithWait(oauthConsumer.getJob()); try { ClientUtils.waitForClientSuccess(Environment.TEST_SUITE_NAMESPACE, consumerName, testStorage.getMessageCount()); - return true; + return true; } catch (WaitException e) { e.printStackTrace(); return false; @@ -477,24 +491,25 @@ void testProducerConsumerBridgeWithOauthMetrics() { String producerName = OAUTH_PRODUCER_NAME + "-" + testStorage.getClusterName(); String consumerName = OAUTH_CONSUMER_NAME + "-" + testStorage.getClusterName(); - KafkaOauthClients oauthExampleClients = new KafkaOauthClientsBuilder() + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() .withNamespaceName(Environment.TEST_SUITE_NAMESPACE) .withProducerName(producerName) .withConsumerName(consumerName) .withBootstrapAddress(KafkaResources.plainBootstrapAddress(oauthClusterName)) .withTopicName(testStorage.getTopicName()) .withMessageCount(testStorage.getMessageCount()) - .withOauthClientId(OAUTH_CLIENT_NAME) - .withOauthClientSecret(OAUTH_CLIENT_SECRET) - .withOauthTokenEndpointUri(keycloakInstance.getOauthTokenEndpointUri()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withAuthentication(ClientsAuthentication.configureOAuthPlain(OAUTH_CLIENT_NAME, OAUTH_CLIENT_SECRET, keycloakInstance.getOauthTokenEndpointUri())) .build(); KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(Environment.TEST_SUITE_NAMESPACE, testStorage.getTopicName(), oauthClusterName).build()); - KubeResourceManager.get().createResourceWithWait(oauthExampleClients.producerStrimziOauthPlain()); - ClientUtils.waitForClientSuccess(Environment.TEST_SUITE_NAMESPACE, producerName, testStorage.getMessageCount()); - KubeResourceManager.get().createResourceWithWait(oauthExampleClients.consumerStrimziOauthPlain()); - ClientUtils.waitForClientSuccess(Environment.TEST_SUITE_NAMESPACE, consumerName, testStorage.getMessageCount()); + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(Environment.TEST_SUITE_NAMESPACE, consumerName, producerName, testStorage.getMessageCount()); // needed for a verification of oauth configuration InlineLogging ilDebug = new InlineLogging(); @@ -541,19 +556,17 @@ void testProducerConsumerBridgeWithOauthMetrics() { String bridgeProducerName = "bridge-producer-" + testStorage.getClusterName(); - BridgeClients kafkaBridgeClientJob = new BridgeClientsBuilder() + HttpProducerClient httpProducer = new HttpProducerClientBuilder() .withNamespaceName(Environment.TEST_SUITE_NAMESPACE) - .withProducerName(bridgeProducerName) - .withBootstrapAddress(KafkaBridgeResources.serviceName(oauthClusterName)) - .withComponentName(KafkaBridgeResources.componentName(oauthClusterName)) + .withName(bridgeProducerName) + .withHostname(KafkaBridgeResources.serviceName(oauthClusterName)) .withTopicName(testStorage.getTopicName()) .withMessageCount(testStorage.getMessageCount()) - .withPort(HTTP_BRIDGE_DEFAULT_PORT) + .withPort(TestConstants.HTTP_BRIDGE_DEFAULT_PORT) .withDelayMs(1000) - .withPollInterval(1000) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaBridgeClientJob.producerStrimziBridge()); + KubeResourceManager.get().createResourceWithWait(httpProducer.getJob()); ClientUtils.waitForClientSuccess(Environment.TEST_SUITE_NAMESPACE, bridgeProducerName, testStorage.getMessageCount()); assertOauthMetricsForComponent( diff --git a/systemtest/src/test/java/io/strimzi/systemtest/specific/AccessOperatorST.java b/systemtest/src/test/java/io/strimzi/systemtest/specific/AccessOperatorST.java index 60c7c0a2d51..67bfb894ec0 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/specific/AccessOperatorST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/specific/AccessOperatorST.java @@ -21,8 +21,6 @@ import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.IsolatedTest; import io.strimzi.systemtest.docs.TestDocsLabels; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder; import io.strimzi.systemtest.resources.access.SetupAccessOperator; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; import io.strimzi.systemtest.storage.TestStorage; @@ -32,6 +30,8 @@ import io.strimzi.systemtest.templates.crd.KafkaUserTemplates; import io.strimzi.systemtest.utils.ClientUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.SecretUtils; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.BeforeAll; @@ -166,23 +166,25 @@ void testAccessOperator() { .build() ); - final KafkaClients kafkaClients = new KafkaClientsBuilder() + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() .withBootstrapAddress(bootstrapServer) .withNamespaceName(testStorage.getNamespaceName()) .withProducerName(testStorage.getProducerName()) .withConsumerName(testStorage.getConsumerName()) .withMessageCount(TestConstants.MESSAGE_COUNT) .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withAdditionalEnvVars(tlsEnvVarsForKafkaAccess) .build(); LOGGER.info("Deploying Kafka clients with configured TLS using the KafkaAccess' secret"); KubeResourceManager.get().createResourceWithWait( - kafkaClients.producerTlsStrimziWithTlsEnvVars(tlsEnvVarsForKafkaAccess), - kafkaClients.consumerTlsStrimziWithTlsEnvVars(tlsEnvVarsForKafkaAccess) + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() ); LOGGER.info("Verifying successful message transmission"); - ClientUtils.waitForInstantClientSuccess(testStorage); + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } @BeforeAll diff --git a/systemtest/src/test/java/io/strimzi/systemtest/specific/DrainCleanerST.java b/systemtest/src/test/java/io/strimzi/systemtest/specific/DrainCleanerST.java index e62a7fac9d5..58a644dcdb3 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/specific/DrainCleanerST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/specific/DrainCleanerST.java @@ -6,11 +6,11 @@ import io.fabric8.kubernetes.client.KubernetesClientException; import io.skodjob.kubetest4j.resources.KubeResourceManager; +import io.strimzi.api.kafka.model.kafka.KafkaResources; import io.strimzi.systemtest.AbstractST; import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.IsolatedTest; import io.strimzi.systemtest.annotations.MicroShiftNotSupported; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; import io.strimzi.systemtest.resources.draincleaner.SetupDrainCleaner; import io.strimzi.systemtest.resources.operator.ClusterOperatorConfigurationBuilder; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; @@ -22,6 +22,8 @@ import io.strimzi.systemtest.utils.RollingUpdateUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.NetworkPolicyUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.BeforeAll; @@ -59,13 +61,21 @@ void testDrainCleanerWithComponents() { // allow NetworkPolicies for the webhook in case that we have "default to deny all" mode enabled NetworkPolicyUtils.allowNetworkPolicySettingsForWebhook(TestConstants.DRAIN_CLEANER_NAMESPACE, TestConstants.DRAIN_CLEANER_DEPLOYMENT_NAME, Map.of(TestConstants.APP_POD_LABEL, TestConstants.DRAIN_CLEANER_DEPLOYMENT_NAME)); - final KafkaClients continuousClients = ClientUtils.getContinuousPlainClientBuilder(testStorage) + final KafkaProducerConsumer continuousKafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getContinuousProducerName()) + .withConsumerName(testStorage.getContinuousConsumerName()) .withNamespaceName(TestConstants.DRAIN_CLEANER_NAMESPACE) + .withTopicName(testStorage.getContinuousTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getContinuousMessageCount()) + .withDelayMs(1000) .build(); KubeResourceManager.get().createResourceWithWait( - continuousClients.producerStrimzi(), - continuousClients.consumerStrimzi()); + continuousKafkaProducerConsumer.getProducer().getJob(), + continuousKafkaProducerConsumer.getConsumer().getJob() + ); List brokerPods = PodUtils.listPodNames(TestConstants.DRAIN_CLEANER_NAMESPACE, testStorage.getBrokerSelector()); @@ -77,7 +87,7 @@ void testDrainCleanerWithComponents() { evictPodWithName(kafkaPodName); RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(TestConstants.DRAIN_CLEANER_NAMESPACE, testStorage.getBrokerSelector(), replicas, kafkaPod); - ClientUtils.waitForClientsSuccess(TestConstants.DRAIN_CLEANER_NAMESPACE, testStorage.getContinuousConsumerName(), testStorage.getContinuousProducerName(), 200); + ClientUtils.waitForClientsSuccess(TestConstants.DRAIN_CLEANER_NAMESPACE, testStorage.getContinuousConsumerName(), testStorage.getContinuousProducerName(), testStorage.getContinuousMessageCount()); } private void evictPodWithName(String podName) { diff --git a/systemtest/src/test/java/io/strimzi/systemtest/specific/RackAwarenessST.java b/systemtest/src/test/java/io/strimzi/systemtest/specific/RackAwarenessST.java index c676a815bcd..c6205a41ce2 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/specific/RackAwarenessST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/specific/RackAwarenessST.java @@ -18,7 +18,6 @@ import io.strimzi.systemtest.Environment; import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.ParallelNamespaceTest; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; import io.strimzi.systemtest.kafkaclients.internalClients.admin.AdminClient; import io.strimzi.systemtest.labels.LabelSelectors; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; @@ -36,6 +35,8 @@ import io.strimzi.systemtest.utils.kafkaUtils.KafkaConnectUtils; import io.strimzi.systemtest.utils.kubeUtils.controllers.StrimziPodSetUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.Assumptions; @@ -118,9 +119,22 @@ void testKafkaRackAwareness() { assertThat(AdminClientUtils.getRack(nodeDescription, "0").contains(hostname), is(true)); LOGGER.info("Producing and Consuming data in the Kafka cluster: {}/{}", testStorage.getNamespaceName(), testStorage.getClusterName()); - KafkaClients kafkaClients = ClientUtils.getInstantPlainClients(testStorage); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerStrimzi(), kafkaClients.consumerStrimzi()); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } /** @@ -204,9 +218,22 @@ void testConnectRackAwareness() { assertThat(commandOut.contains("consumer.client.rack=${strimzidir:/opt/kafka/init:rack.id}"), is(true)); // produce data which are to be available in the topic - final KafkaClients kafkaClients = ClientUtils.getInstantPlainClients(testStorage); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerStrimzi(), kafkaClients.consumerStrimzi()); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); consumeDataWithNewSinkConnector(testStorage.getNamespaceName(), testStorage.getClusterName(), testStorage.getClusterName(), testStorage.getTopicName(), testStorage.getMessageCount()); } @@ -283,16 +310,32 @@ void testMirrorMaker2RackAwareness() { KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), testStorage.getSourceClusterName(), 3).build()); LOGGER.info("Producing messages into the source Kafka: {}/{}, Topic: {}", testStorage.getNamespaceName(), testStorage.getSourceClusterName(), testStorage.getTopicName()); - final KafkaClients sourceClients = ClientUtils.getInstantPlainClients(testStorage, KafkaResources.plainBootstrapAddress(testStorage.getSourceClusterName())); - KubeResourceManager.get().createResourceWithWait(sourceClients.producerStrimzi()); - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + final KafkaProducerConsumer sourceKafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getSourceClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait( + sourceKafkaProducerConsumer.getProducer().getJob(), + sourceKafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); LOGGER.info("Consuming messages in the target Kafka: {}/{} mirrored Topic: {}", testStorage.getNamespaceName(), testStorage.getTargetClusterName(), testStorage.getMirroredSourceTopicName()); - final KafkaClients targetClients = ClientUtils.getInstantPlainClientBuilder(testStorage, KafkaResources.plainBootstrapAddress(testStorage.getTargetClusterName())) + final KafkaProducerConsumer targetKafkaProducerConsumer = new KafkaProducerConsumerBuilder(sourceKafkaProducerConsumer) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getTargetClusterName())) .withTopicName(testStorage.getMirroredSourceTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) .build(); - KubeResourceManager.get().createResourceWithWait(targetClients.consumerStrimzi()); - ClientUtils.waitForInstantConsumerClientSuccess(testStorage); + + KubeResourceManager.get().createResourceWithWait(targetKafkaProducerConsumer.getConsumer().getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); } private void consumeDataWithNewSinkConnector(String namespaceName, String newConnectorName, String connectClusterName, String topicName, int msgCount) { diff --git a/systemtest/src/test/java/io/strimzi/systemtest/tracing/OpenTelemetryST.java b/systemtest/src/test/java/io/strimzi/systemtest/tracing/OpenTelemetryST.java index 80505c2eefe..20532feec5f 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/tracing/OpenTelemetryST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/tracing/OpenTelemetryST.java @@ -4,10 +4,9 @@ */ package io.strimzi.systemtest.tracing; +import io.fabric8.kubernetes.api.model.EnvVarBuilder; import io.skodjob.kubetest4j.resources.KubeResourceManager; import io.strimzi.api.kafka.model.bridge.KafkaBridgeResources; -import io.strimzi.api.kafka.model.common.tracing.OpenTelemetryTracing; -import io.strimzi.api.kafka.model.common.tracing.Tracing; import io.strimzi.api.kafka.model.connect.KafkaConnectBuilder; import io.strimzi.api.kafka.model.connect.KafkaConnectResources; import io.strimzi.api.kafka.model.kafka.KafkaResources; @@ -16,10 +15,6 @@ import io.strimzi.systemtest.Environment; import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.ParallelNamespaceTest; -import io.strimzi.systemtest.kafkaclients.internalClients.BridgeTracingClients; -import io.strimzi.systemtest.kafkaclients.internalClients.BridgeTracingClientsBuilder; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaTracingClients; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaTracingClientsBuilder; import io.strimzi.systemtest.resources.jaeger.SetupOpenTelemetry; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; import io.strimzi.systemtest.storage.TestStorage; @@ -33,16 +28,24 @@ import io.strimzi.systemtest.templates.specific.ScraperTemplates; import io.strimzi.systemtest.utils.ClientUtils; import io.strimzi.systemtest.utils.specific.TracingUtils; +import io.strimzi.testclients.clients.http.HttpProducerClient; +import io.strimzi.testclients.clients.http.HttpProducerClientBuilder; +import io.strimzi.testclients.clients.kafka.KafkaConsumerClient; +import io.strimzi.testclients.clients.kafka.KafkaConsumerClientBuilder; +import io.strimzi.testclients.clients.kafka.KafkaProducerClient; +import io.strimzi.testclients.clients.kafka.KafkaProducerClientBuilder; +import io.strimzi.testclients.clients.kafka.KafkaStreamsClient; +import io.strimzi.testclients.clients.kafka.KafkaStreamsClientBuilder; +import io.strimzi.testclients.configuration.Tracing; +import io.strimzi.testclients.configuration.TracingBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.extension.ExtensionContext; import java.util.HashMap; import java.util.Map; -import static io.strimzi.systemtest.TestConstants.KAFKA_TRACING_CLIENT_KEY; import static io.strimzi.systemtest.TestTags.BRIDGE; import static io.strimzi.systemtest.TestTags.CONNECT; import static io.strimzi.systemtest.TestTags.CONNECT_COMPONENTS; @@ -65,11 +68,10 @@ public class OpenTelemetryST extends AbstractST { private static final Logger LOGGER = LogManager.getLogger(OpenTelemetryST.class); - private final Tracing otelTracing = new OpenTelemetryTracing(); - @ParallelNamespaceTest void testProducerConsumerStreamsService() { final TestStorage testStorage = deployInitialResourcesAndGetTestStorage(); + final String kafkaStreamsName = testStorage.getClusterName() + "-streams"; KubeResourceManager.get().createResourceWithWait( KafkaNodePoolTemplates.brokerPool(testStorage.getNamespaceName(), testStorage.getBrokerPoolName(), testStorage.getClusterName(), 3).build(), @@ -81,21 +83,50 @@ void testProducerConsumerStreamsService() { KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getStreamsTopicTargetName(), testStorage.getClusterName(), 12, 3).build()); - KubeResourceManager.get().createResourceWithWait((testStorage.getTracingClients()).producerWithTracing()); + final KafkaProducerClient tracingKafkaProducer = new KafkaProducerClientBuilder() + .withName(testStorage.getProducerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withTracing(tracingConfiguration(JAEGER_PRODUCER_SERVICE)) + .build(); + + KubeResourceManager.get().createResourceWithWait(tracingKafkaProducer.getJob()); TracingUtils.verify(testStorage.getNamespaceName(), JAEGER_PRODUCER_SERVICE, testStorage.getScraperPodName(), JAEGER_QUERY_SERVICE); - KubeResourceManager.get().createResourceWithWait((testStorage.getTracingClients()).consumerWithTracing()); + final KafkaConsumerClient tracingKafkaConsumer = new KafkaConsumerClientBuilder() + .withName(testStorage.getConsumerName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withTracing(tracingConfiguration(JAEGER_CONSUMER_SERVICE)) + .build(); + + KubeResourceManager.get().createResourceWithWait(tracingKafkaConsumer.getJob()); TracingUtils.verify(testStorage.getNamespaceName(), JAEGER_CONSUMER_SERVICE, testStorage.getScraperPodName(), JAEGER_QUERY_SERVICE); - KubeResourceManager.get().createResourceWithWait((testStorage.getTracingClients()).kafkaStreamsWithTracing()); + final KafkaStreamsClient tracingKafkaStreams = new KafkaStreamsClientBuilder() + .withName(kafkaStreamsName) + .withNamespaceName(testStorage.getNamespaceName()) + .withSourceTopicName(testStorage.getTopicName()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withTracing(tracingConfiguration(JAEGER_KAFKA_STREAMS_SERVICE)) + .withTargetTopicName(testStorage.getStreamsTopicTargetName()) + .withApplicationId(kafkaStreamsName) + .build(); + + KubeResourceManager.get().createResourceWithWait(tracingKafkaStreams.getJob()); TracingUtils.verify(testStorage.getNamespaceName(), JAEGER_KAFKA_STREAMS_SERVICE, @@ -127,25 +158,35 @@ void testProducerConsumerMirrorMaker2Service() { LOGGER.info("Setting for Kafka source plain bootstrap: {}", KafkaResources.plainBootstrapAddress(testStorage.getSourceClusterName())); - KafkaTracingClients sourceKafkaTracingClient = new KafkaTracingClientsBuilder(testStorage.getTracingClients()) + final KafkaProducerClient tracingKafkaProducer = new KafkaProducerClientBuilder() + .withName(testStorage.getProducerName()) + .withNamespaceName(testStorage.getNamespaceName()) .withTopicName(testStorage.getTopicName()) .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getSourceClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withTracing(tracingConfiguration(JAEGER_PRODUCER_SERVICE)) .build(); - KubeResourceManager.get().createResourceWithWait(sourceKafkaTracingClient.producerWithTracing()); + KubeResourceManager.get().createResourceWithWait(tracingKafkaProducer.getJob()); LOGGER.info("Setting for Kafka target plain bootstrap: {}", KafkaResources.plainBootstrapAddress(testStorage.getTargetClusterName())); - final KafkaTracingClients targetKafkaTracingClient = new KafkaTracingClientsBuilder(testStorage.getTracingClients()) - .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getTargetClusterName())) + final KafkaConsumerClient tracingKafkaConsumer = new KafkaConsumerClientBuilder() + .withName(testStorage.getConsumerName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withNamespaceName(testStorage.getNamespaceName()) .withTopicName(testStorage.getMirroredSourceTopicName()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getTargetClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withTracing(tracingConfiguration(JAEGER_CONSUMER_SERVICE)) .build(); - KubeResourceManager.get().createResourceWithWait(targetKafkaTracingClient.consumerWithTracing()); + KubeResourceManager.get().createResourceWithWait(tracingKafkaConsumer.getJob()); KubeResourceManager.get().createResourceWithWait(KafkaMirrorMaker2Templates.kafkaMirrorMaker2(testStorage.getNamespaceName(), testStorage.getClusterName(), testStorage.getSourceClusterName(), testStorage.getTargetClusterName(), 1, false) .editSpec() - .withTracing(otelTracing) + .withNewOpenTelemetryTracing() + .endOpenTelemetryTracing() .withNewTemplate() .withNewConnectContainer() .addNewEnv() @@ -184,6 +225,7 @@ void testProducerConsumerMirrorMaker2Service() { @SuppressWarnings({"checkstyle:MethodLength"}) void testProducerConsumerStreamsConnectService() { final TestStorage testStorage = deployInitialResourcesAndGetTestStorage(); + final String kafkaStreamsName = testStorage.getClusterName() + "-streams"; KubeResourceManager.get().createResourceWithWait( KafkaNodePoolTemplates.brokerPool(testStorage.getNamespaceName(), testStorage.getBrokerPoolName(), testStorage.getClusterName(), 3).build(), @@ -214,7 +256,8 @@ void testProducerConsumerStreamsConnectService() { .withOffsetStorageTopic(KafkaConnectResources.configStorageTopicOffsets(testStorage.getClusterName())) .withStatusStorageTopic(KafkaConnectResources.configStorageTopicStatus(testStorage.getClusterName())) .withConfig(configOfKafkaConnect) - .withTracing(otelTracing) + .withNewOpenTelemetryTracing() + .endOpenTelemetryTracing() .withBootstrapServers(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) .withReplicas(1) .withNewTemplate() @@ -243,9 +286,40 @@ void testProducerConsumerStreamsConnectService() { .endSpec() .build()); - KubeResourceManager.get().createResourceWithWait(((KafkaTracingClients) KubeResourceManager.get().getTestContext().getStore(ExtensionContext.Namespace.GLOBAL).get(KAFKA_TRACING_CLIENT_KEY)).producerWithTracing()); - KubeResourceManager.get().createResourceWithWait(((KafkaTracingClients) KubeResourceManager.get().getTestContext().getStore(ExtensionContext.Namespace.GLOBAL).get(KAFKA_TRACING_CLIENT_KEY)).consumerWithTracing()); - KubeResourceManager.get().createResourceWithWait(((KafkaTracingClients) KubeResourceManager.get().getTestContext().getStore(ExtensionContext.Namespace.GLOBAL).get(KAFKA_TRACING_CLIENT_KEY)).kafkaStreamsWithTracing()); + final KafkaProducerClient tracingKafkaProducer = new KafkaProducerClientBuilder() + .withName(testStorage.getProducerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withTracing(tracingConfiguration(JAEGER_PRODUCER_SERVICE)) + .build(); + + KubeResourceManager.get().createResourceWithWait(tracingKafkaProducer.getJob()); + + final KafkaConsumerClient tracingKafkaConsumer = new KafkaConsumerClientBuilder() + .withName(testStorage.getConsumerName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withTracing(tracingConfiguration(JAEGER_CONSUMER_SERVICE)) + .build(); + + KubeResourceManager.get().createResourceWithWait(tracingKafkaConsumer.getJob()); + + final KafkaStreamsClient tracingKafkaStreams = new KafkaStreamsClientBuilder() + .withName(kafkaStreamsName) + .withNamespaceName(testStorage.getNamespaceName()) + .withSourceTopicName(testStorage.getTopicName()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withTracing(tracingConfiguration(JAEGER_KAFKA_STREAMS_SERVICE)) + .withTargetTopicName(testStorage.getStreamsTopicTargetName()) + .withApplicationId(kafkaStreamsName) + .build(); + + KubeResourceManager.get().createResourceWithWait(tracingKafkaStreams.getJob()); ClientUtils.waitForClientsSuccess( testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), @@ -271,7 +345,8 @@ void testKafkaBridgeService() { // Deploy http bridge KubeResourceManager.get().createResourceWithWait(KafkaBridgeTemplates.kafkaBridge(testStorage.getNamespaceName(), testStorage.getClusterName(), KafkaResources.plainBootstrapAddress(testStorage.getClusterName()), 1) .editSpec() - .withTracing(otelTracing) + .withNewOpenTelemetryTracing() + .endOpenTelemetryTracing() .withNewTemplate() .withNewBridgeContainer() .addNewEnv() @@ -294,21 +369,32 @@ void testKafkaBridgeService() { final String bridgeProducer = "bridge-producer"; KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), testStorage.getClusterName()).build()); - final BridgeTracingClients kafkaBridgeClientJob = new BridgeTracingClientsBuilder() - .withTracingServiceNameEnvVar(TracingConstants.OTEL_SERVICE_ENV) - .withProducerName(bridgeProducer) + HttpProducerClient httpProducer = new HttpProducerClientBuilder() .withNamespaceName(testStorage.getNamespaceName()) - .withBootstrapAddress(KafkaBridgeResources.serviceName(testStorage.getClusterName())) + .withName(bridgeProducer) + .withHostname(KafkaBridgeResources.serviceName(testStorage.getClusterName())) .withTopicName(testStorage.getTopicName()) .withMessageCount(testStorage.getMessageCount()) .withPort(TestConstants.HTTP_BRIDGE_DEFAULT_PORT) .withDelayMs(1000) - .withPollInterval(1000) - .withOpenTelemetry() + .withTracing(tracingConfiguration(bridgeProducer)) + .addToAdditionalEnvVars() .build(); - KubeResourceManager.get().createResourceWithWait(kafkaBridgeClientJob.producerStrimziBridgeWithTracing()); - KubeResourceManager.get().createResourceWithWait((testStorage.getTracingClients()).consumerWithTracing()); + KubeResourceManager.get().createResourceWithWait(httpProducer.getJob()); + + final KafkaConsumerClient tracingKafkaConsumer = new KafkaConsumerClientBuilder() + .withName(testStorage.getConsumerName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withTracing(tracingConfiguration(JAEGER_CONSUMER_SERVICE)) + .build(); + + KubeResourceManager.get().createResourceWithWait(tracingKafkaConsumer.getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), bridgeProducer, testStorage.getMessageCount()); TracingUtils.verify(testStorage.getNamespaceName(), JAEGER_KAFKA_BRIDGE_SERVICE, testStorage.getScraperPodName(), JAEGER_QUERY_SERVICE); @@ -327,7 +413,8 @@ void testKafkaBridgeServiceWithHttpTracing() { // Deploy http bridge KubeResourceManager.get().createResourceWithWait(KafkaBridgeTemplates.kafkaBridge(testStorage.getNamespaceName(), testStorage.getClusterName(), KafkaResources.plainBootstrapAddress(testStorage.getClusterName()), 1) .editSpec() - .withTracing(otelTracing) + .withNewOpenTelemetryTracing() + .endOpenTelemetryTracing() .withNewTemplate() .withNewBridgeContainer() .addNewEnv() @@ -350,21 +437,32 @@ void testKafkaBridgeServiceWithHttpTracing() { KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), testStorage.getClusterName()).build()); final String bridgeProducer = "bridge-producer"; - final BridgeTracingClients kafkaBridgeClientJob = new BridgeTracingClientsBuilder() - .withTracingServiceNameEnvVar(TracingConstants.OTEL_SERVICE_ENV) - .withProducerName(bridgeProducer) + HttpProducerClient httpProducer = new HttpProducerClientBuilder() .withNamespaceName(testStorage.getNamespaceName()) - .withBootstrapAddress(KafkaBridgeResources.serviceName(testStorage.getClusterName())) + .withName(bridgeProducer) + .withHostname(KafkaBridgeResources.serviceName(testStorage.getClusterName())) .withTopicName(testStorage.getTopicName()) .withMessageCount(testStorage.getMessageCount()) .withPort(TestConstants.HTTP_BRIDGE_DEFAULT_PORT) .withDelayMs(1000) - .withPollInterval(1000) - .withOpenTelemetry() + .withTracing(tracingConfiguration(bridgeProducer)) + .addToAdditionalEnvVars() + .build(); + + KubeResourceManager.get().createResourceWithWait(httpProducer.getJob()); + + final KafkaConsumerClient tracingKafkaConsumer = new KafkaConsumerClientBuilder() + .withName(testStorage.getConsumerName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .withTracing(tracingConfiguration(JAEGER_CONSUMER_SERVICE)) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaBridgeClientJob.producerStrimziBridgeWithTracing()); - KubeResourceManager.get().createResourceWithWait((testStorage.getTracingClients()).consumerWithTracing()); + KubeResourceManager.get().createResourceWithWait(tracingKafkaConsumer.getJob()); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), bridgeProducer, testStorage.getMessageCount()); TracingUtils.verify(testStorage.getNamespaceName(), JAEGER_KAFKA_BRIDGE_SERVICE, testStorage.getScraperPodName(), JAEGER_QUERY_SERVICE); @@ -379,28 +477,22 @@ private TestStorage deployInitialResourcesAndGetTestStorage() { KubeResourceManager.get().createResourceWithWait(ScraperTemplates.scraperPod(testStorage.getNamespaceName(), testStorage.getScraperName()).build()); testStorage.addToTestStorage(TestConstants.SCRAPER_POD_KEY, KubeResourceManager.get().kubeClient().listPodsByPrefixInName(testStorage.getNamespaceName(), testStorage.getScraperName()).get(0).getMetadata().getName()); - final KafkaTracingClients kafkaTracingClients = new KafkaTracingClientsBuilder() - .withNamespaceName(testStorage.getNamespaceName()) - .withProducerName(testStorage.getProducerName()) - .withConsumerName(testStorage.getConsumerName()) - .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) - .withTopicName(testStorage.getTopicName()) - .withStreamsTopicTargetName(testStorage.getStreamsTopicTargetName()) - .withMessageCount(testStorage.getMessageCount()) - .withJaegerServiceProducerName(JAEGER_PRODUCER_SERVICE) - .withJaegerServiceConsumerName(JAEGER_CONSUMER_SERVICE) - .withJaegerServiceStreamsName(JAEGER_KAFKA_STREAMS_SERVICE) - .withTracingServiceNameEnvVar(TracingConstants.OTEL_SERVICE_ENV) - .withOpenTelemetry() - .build(); - - LOGGER.info("{}:\n", kafkaTracingClients.toString()); - - testStorage.addToTestStorage(TestConstants.KAFKA_TRACING_CLIENT_KEY, kafkaTracingClients); - return testStorage; } + private Tracing tracingConfiguration(String serviceName) { + return new TracingBuilder() + .withServiceNameEnvVar(TracingConstants.OTEL_SERVICE_ENV) + .withServiceName(serviceName) + .withTracingType(TracingConstants.OPEN_TELEMETRY) + .withAdditionalTracingEnvVars(new EnvVarBuilder() + .withName("OTEL_EXPORTER_OTLP_ENDPOINT") + .withValue(JAEGER_COLLECTOR_OTLP_URL) + .build() + ) + .build(); + } + @BeforeAll void setup() { assumeFalse(Environment.isNamespaceRbacScope()); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/AbstractKRaftUpgradeST.java b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/AbstractKRaftUpgradeST.java index ea959fe0ab6..0c5ed1e0025 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/AbstractKRaftUpgradeST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/AbstractKRaftUpgradeST.java @@ -36,7 +36,7 @@ import io.strimzi.systemtest.AbstractST; import io.strimzi.systemtest.Environment; import io.strimzi.systemtest.TestConstants; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; +import io.strimzi.systemtest.kafkaclients.ClientsAuthentication; import io.strimzi.systemtest.labels.LabelSelectors; import io.strimzi.systemtest.resources.CrdClients; import io.strimzi.systemtest.resources.operator.ClusterOperatorConfiguration; @@ -64,6 +64,10 @@ import io.strimzi.test.ReadWriteUtils; import io.strimzi.test.TestUtils; import io.strimzi.test.k8s.KubeClusterResource; +import io.strimzi.testclients.clients.kafka.KafkaProducerClient; +import io.strimzi.testclients.clients.kafka.KafkaProducerClientBuilder; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -189,14 +193,20 @@ protected void doKafkaConnectAndKafkaConnectorUpgradeOrDowngradeProcedure( */ private void verifyPostUpgradeOrDowngradeProcedure(final TestStorage testStorage, final BundleVersionModificationData upgradeDowngradeData) { - final KafkaClients clients = ClientUtils.getInstantTlsClientBuilder(testStorage, KafkaResources.tlsBootstrapAddress(CLUSTER_NAME)) - .withUsername(USER_NAME) + final KafkaProducerClient kafkaProducer = new KafkaProducerClientBuilder() + .withName(testStorage.getProducerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(CLUSTER_NAME)) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTls(CLUSTER_NAME, USER_NAME)) .build(); + // send again new messages - KubeResourceManager.get().createResourceWithWait(clients.producerTlsStrimzi(CLUSTER_NAME)); + KubeResourceManager.get().createResourceWithWait(kafkaProducer.getJob()); // Verify that Producer finish successfully - ClientUtils.waitForInstantProducerClientSuccess(testStorage.getNamespaceName(), testStorage); + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); // Verify FileSink KafkaConnector verifyKafkaConnectorFileSink(testStorage); @@ -251,13 +261,18 @@ private void maybeWaitForRollingUpdate(final String namespaceName, * @param testStorage Test-related configuration and storage */ private void produceMessagesAndVerify(TestStorage testStorage) { - final KafkaClients clients = ClientUtils.getInstantTlsClientBuilder(testStorage, KafkaResources.tlsBootstrapAddress(CLUSTER_NAME)) + final KafkaProducerClient kafkaProducer = new KafkaProducerClientBuilder() + .withName(testStorage.getProducerName()) .withNamespaceName(testStorage.getNamespaceName()) - .withUsername(USER_NAME) + .withTopicName(testStorage.getTopicName()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(CLUSTER_NAME)) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTls(CLUSTER_NAME, USER_NAME)) .build(); - KubeResourceManager.get().createResourceWithWait(clients.producerTlsStrimzi(CLUSTER_NAME)); - ClientUtils.waitForInstantProducerClientSuccess(testStorage); + KubeResourceManager.get().createResourceWithWait(kafkaProducer.getJob()); + + ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); } protected void setupEnvAndUpgradeClusterOperator(String clusterOperatorNamespaceName, TestStorage testStorage, BundleVersionModificationData upgradeData, UpgradeKafkaVersion upgradeKafkaVersion) throws IOException { @@ -312,16 +327,21 @@ protected void setupEnvAndUpgradeClusterOperator(String clusterOperatorNamespace // 40s is used within TF environment to make upgrade/downgrade more stable on slow env String producerAdditionConfiguration = "delivery.timeout.ms=40000\nrequest.timeout.ms=5000"; - KafkaClients kafkaBasicClientJob = ClientUtils.getContinuousPlainClientBuilder(testStorage) + final KafkaProducerConsumer continuousKafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getContinuousProducerName()) + .withConsumerName(testStorage.getContinuousConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getContinuousTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) .withBootstrapAddress(KafkaResources.plainBootstrapAddress(CLUSTER_NAME)) - .withMessageCount(upgradeData.getContinuousClientsMessages()) + .withMessageCount(testStorage.getContinuousMessageCount()) + .withDelayMs(1000) .withAdditionalConfig(producerAdditionConfiguration) - .withNamespaceName(testStorage.getNamespaceName()) .build(); KubeResourceManager.get().createResourceWithWait( - kafkaBasicClientJob.producerStrimzi(), - kafkaBasicClientJob.consumerStrimzi() + continuousKafkaProducerConsumer.getProducer().getJob(), + continuousKafkaProducerConsumer.getConsumer().getJob() ); // ############################## } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/KRaftKafkaUpgradeDowngradeST.java b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/KRaftKafkaUpgradeDowngradeST.java index caf39506761..0aa437f1ea8 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/KRaftKafkaUpgradeDowngradeST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/KRaftKafkaUpgradeDowngradeST.java @@ -9,7 +9,6 @@ import io.strimzi.api.kafka.model.kafka.KafkaResources; import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.IsolatedTest; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; import io.strimzi.systemtest.resources.CrdClients; import io.strimzi.systemtest.resources.crd.KafkaComponents; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; @@ -23,6 +22,8 @@ import io.strimzi.systemtest.utils.kafkaUtils.KafkaUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils; import io.strimzi.test.TestUtils; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.BeforeAll; @@ -158,15 +159,22 @@ void runVersionChange(TestStorage testStorage, TestKafkaVersion initialVersion, // 40s is used within TF environment to make upgrade/downgrade more stable on slow env String producerAdditionConfiguration = "delivery.timeout.ms=300000\nrequest.timeout.ms=20000"; - KafkaClients kafkaBasicClientJob = ClientUtils.getContinuousPlainClientBuilder(testStorage) + final KafkaProducerConsumer continuousKafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getContinuousProducerName()) + .withConsumerName(testStorage.getContinuousConsumerName()) .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getContinuousTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) .withBootstrapAddress(KafkaResources.plainBootstrapAddress(CLUSTER_NAME)) .withMessageCount(continuousClientsMessageCount) .withAdditionalConfig(producerAdditionConfiguration) + .withDelayMs(1000) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaBasicClientJob.producerStrimzi()); - KubeResourceManager.get().createResourceWithWait(kafkaBasicClientJob.consumerStrimzi()); + KubeResourceManager.get().createResourceWithWait( + continuousKafkaProducerConsumer.getProducer().getJob(), + continuousKafkaProducerConsumer.getConsumer().getJob() + ); // ############################## } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/KRaftOlmUpgradeST.java b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/KRaftOlmUpgradeST.java index 6c298b2b47e..dcb56172f8c 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/upgrade/KRaftOlmUpgradeST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/upgrade/KRaftOlmUpgradeST.java @@ -13,8 +13,6 @@ import io.strimzi.systemtest.Environment; import io.strimzi.systemtest.annotations.IsolatedTest; import io.strimzi.systemtest.enums.OlmInstallationStrategy; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder; import io.strimzi.systemtest.resources.operator.ClusterOperatorConfiguration; import io.strimzi.systemtest.resources.operator.ClusterOperatorConfigurationBuilder; import io.strimzi.systemtest.resources.operator.SetupClusterOperator; @@ -26,6 +24,8 @@ import io.strimzi.systemtest.utils.RollingUpdateUtils; import io.strimzi.systemtest.utils.kubeUtils.controllers.DeploymentUtils; import io.strimzi.systemtest.utils.specific.OlmUtils; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.Tag; @@ -107,7 +107,7 @@ void testStrimziUpgrade() throws IOException { KubeResourceManager.get().createResourceWithWait(kafkaUpgradeTopic); - KafkaClients kafkaBasicClientJob = new KafkaClientsBuilder() + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() .withProducerName(testStorage.getProducerName()) .withConsumerName(testStorage.getConsumerName()) .withNamespaceName(CO_NAMESPACE) @@ -115,9 +115,13 @@ void testStrimziUpgrade() throws IOException { .withTopicName(topicUpgradeName) .withMessageCount(testStorage.getMessageCount()) .withDelayMs(1000) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) .build(); - KubeResourceManager.get().createResourceWithWait(kafkaBasicClientJob.producerStrimzi(), kafkaBasicClientJob.consumerStrimzi()); + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); clusterOperatorConfiguration.setOperatorDeploymentName(KubeResourceManager.get().kubeClient().getDeploymentNameByPrefix(CO_NAMESPACE, Environment.OLM_OPERATOR_DEPLOYMENT_NAME)); LOGGER.info("Old deployment name of Cluster Operator is {}", clusterOperatorConfiguration.getOperatorDeploymentName()); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/watcher/AbstractNamespaceST.java b/systemtest/src/test/java/io/strimzi/systemtest/watcher/AbstractNamespaceST.java index 64df1ed0875..d34d7f1b7b1 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/watcher/AbstractNamespaceST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/watcher/AbstractNamespaceST.java @@ -17,7 +17,7 @@ import io.strimzi.systemtest.TestConstants; import io.strimzi.systemtest.annotations.ParallelTest; import io.strimzi.systemtest.cli.KafkaCmdClient; -import io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients; +import io.strimzi.systemtest.kafkaclients.ClientsAuthentication; import io.strimzi.systemtest.labels.LabelSelectors; import io.strimzi.systemtest.resources.CrdClients; import io.strimzi.systemtest.resources.crd.KafkaComponents; @@ -34,6 +34,8 @@ import io.strimzi.systemtest.utils.ClientUtils; import io.strimzi.systemtest.utils.kafkaUtils.KafkaConnectUtils; import io.strimzi.systemtest.utils.kafkaUtils.KafkaConnectorUtils; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; +import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.Tag; @@ -187,11 +189,22 @@ final void testUserInNamespaceDifferentFromUserOperator(ExtensionContext extensi } LOGGER.info("Verifying KafkaUser: {}/{} by using its credentials to communicate with Kafka", PRIMARY_KAFKA_WATCHED_NAMESPACE, testStorage.getUsername()); - final KafkaClients kafkaClients = ClientUtils.getInstantTlsClients(testStorage, KafkaResources.tlsBootstrapAddress(PRIMARY_KAFKA_NAME)); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(PRIMARY_KAFKA_NAME)) + .withMessageCount(testStorage.getMessageCount()) + .withAuthentication(ClientsAuthentication.configureTls(PRIMARY_KAFKA_NAME, testStorage.getUsername())) + .build(); + KubeResourceManager.get().createResourceWithWait( - kafkaClients.producerTlsStrimzi(PRIMARY_KAFKA_NAME), - kafkaClients.consumerTlsStrimzi(PRIMARY_KAFKA_NAME) + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() ); + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } @@ -303,9 +316,22 @@ private void deployKafkaConnectorWithSink(ExtensionContext extensionContext, Str LOGGER.info("KafkaConnect Pod: {}/{}", testStorage.getNamespaceName(), kafkaConnectPodName); KafkaConnectUtils.waitUntilKafkaConnectRestApiIsAvailable(testStorage.getNamespaceName(), kafkaConnectPodName); - final KafkaClients kafkaClients = ClientUtils.getInstantPlainClients(testStorage, KafkaResources.plainBootstrapAddress(PRIMARY_KAFKA_NAME)); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerStrimzi(), kafkaClients.consumerStrimzi()); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(PRIMARY_KAFKA_NAME)) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); KafkaConnectUtils.waitForMessagesInKafkaConnectFileSink(testStorage.getNamespaceName(), kafkaConnectPodName, TestConstants.DEFAULT_SINK_FILE_PATH, testStorage.getMessageCount()); } From f1430233687ff985bc5f2d60d9d1229c74ac4c1d Mon Sep 17 00:00:00 2001 From: Lukas Kral Date: Wed, 15 Apr 2026 16:36:38 +0200 Subject: [PATCH 2/4] use snapshot version for testing Signed-off-by: Lukas Kral --- pom.xml | 18 ++++++++++++++++-- .../systemtest/specific/RackAwarenessST.java | 19 ++++++++++++++++--- 2 files changed, 32 insertions(+), 5 deletions(-) diff --git a/pom.xml b/pom.xml index 61b470ab444..05dac25785c 100644 --- a/pom.xml +++ b/pom.xml @@ -39,6 +39,20 @@ + + + Central Portal Snapshots + central-portal-snapshots + https://central.sonatype.com/repository/maven-snapshots/ + + false + + + true + + + + UTF-8 21 @@ -121,7 +135,7 @@ 0.6.0 0.0.15 0.2.0 - 0.13.0 + 0.14.0-SNAPSHOT false ${skipTests} @@ -797,7 +811,7 @@ io.strimzi.test-clients builders - 0.14.0-SNAPSHOT + ${test-clients.version} provided diff --git a/systemtest/src/test/java/io/strimzi/systemtest/specific/RackAwarenessST.java b/systemtest/src/test/java/io/strimzi/systemtest/specific/RackAwarenessST.java index c6205a41ce2..ec3f22dbda7 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/specific/RackAwarenessST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/specific/RackAwarenessST.java @@ -424,9 +424,22 @@ void testKafkaEnvironmentVariableRackAwareness() { assertThat(AdminClientUtils.getRack(nodeDescription, "0").contains("rack-a"), is(true)); LOGGER.info("Producing and Consuming data in the Kafka cluster: {}/{}", testStorage.getNamespaceName(), testStorage.getClusterName()); - KafkaClients kafkaClients = ClientUtils.getInstantPlainClients(testStorage); - KubeResourceManager.get().createResourceWithWait(kafkaClients.producerStrimzi(), kafkaClients.consumerStrimzi()); - ClientUtils.waitForInstantClientSuccess(testStorage); + final KafkaProducerConsumer kafkaProducerConsumer = new KafkaProducerConsumerBuilder() + .withProducerName(testStorage.getProducerName()) + .withConsumerName(testStorage.getConsumerName()) + .withNamespaceName(testStorage.getNamespaceName()) + .withTopicName(testStorage.getTopicName()) + .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withMessageCount(testStorage.getMessageCount()) + .build(); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } @BeforeAll From a2e1278765a84638ce1c76cc598535bf679c4ea1 Mon Sep 17 00:00:00 2001 From: Lukas Kral Date: Thu, 16 Apr 2026 15:48:50 +0200 Subject: [PATCH 3/4] Fix tests, add NetworkPolicy creation for Bridge clients, cleanup of unused stuff Signed-off-by: Lukas Kral --- .../specific/AdminClientTemplates.java | 279 ------------------ .../kubeUtils/objects/NetworkPolicyUtils.java | 16 +- .../systemtest/bridge/HttpBridgeST.java | 7 + .../bridge/HttpBridgeScramShaST.java | 7 + .../bridge/HttpBridgeServerTlsST.java | 7 + .../systemtest/bridge/HttpBridgeTlsST.java | 32 +- .../systemtest/connect/ConnectBuilderST.java | 2 +- .../strimzi/systemtest/metrics/MetricsST.java | 6 +- .../metrics/StrimziMetricsReporterST.java | 75 ++--- .../mirrormaker/MirrorMaker2ST.java | 6 +- .../systemtest/security/SecurityST.java | 8 +- .../security/oauth/OauthPlainST.java | 3 + .../systemtest/specific/RackAwarenessST.java | 25 +- .../systemtest/tracing/OpenTelemetryST.java | 8 + 14 files changed, 139 insertions(+), 342 deletions(-) delete mode 100644 systemtest/src/main/java/io/strimzi/systemtest/templates/specific/AdminClientTemplates.java diff --git a/systemtest/src/main/java/io/strimzi/systemtest/templates/specific/AdminClientTemplates.java b/systemtest/src/main/java/io/strimzi/systemtest/templates/specific/AdminClientTemplates.java deleted file mode 100644 index 7942fe778f8..00000000000 --- a/systemtest/src/main/java/io/strimzi/systemtest/templates/specific/AdminClientTemplates.java +++ /dev/null @@ -1,279 +0,0 @@ -/* - * Copyright Strimzi authors. - * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). - */ -package io.strimzi.systemtest.templates.specific; - -import io.fabric8.kubernetes.api.model.EnvVar; -import io.fabric8.kubernetes.api.model.EnvVarBuilder; -import io.fabric8.kubernetes.api.model.LocalObjectReference; -import io.fabric8.kubernetes.api.model.PodSpecBuilder; -import io.fabric8.kubernetes.api.model.apps.Deployment; -import io.fabric8.kubernetes.api.model.apps.DeploymentBuilder; -import io.skodjob.kubetest4j.resources.KubeResourceManager; -import io.strimzi.api.kafka.model.kafka.KafkaResources; -import io.strimzi.operator.common.Util; -import io.strimzi.systemtest.Environment; -import io.strimzi.systemtest.TestConstants; -import io.strimzi.systemtest.enums.DeploymentTypes; -import org.apache.kafka.common.security.auth.SecurityProtocol; - -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -public class AdminClientTemplates { - - private AdminClientTemplates() {} - - /////////////////////////////////////////// - // TLS (SSL) - /////////////////////////////////////////// - - /** - * Deploys an AdminClient with TLS enabled, with only essential configuration. - * - * @param namespaceName The Namespace in which to deploy the AdminClient and where Kafka resides. - * @param userName The Kafka userName to correctly configure TLS. - * @param adminName The name of the AdminClient deployment. - * @param clusterName The name of the Kafka cluster to which the AdminClient will connect. - * @param bootstrapName The name of the Kafka bootstrap server to use for the initial connection. - */ - public static Deployment tlsAdminClient(String namespaceName, String userName, String adminName, String clusterName, String bootstrapName) { - return tlsAdminClient(namespaceName, userName, adminName, clusterName, bootstrapName, ""); - } - - /** - * Creates a Deployment configuration for an AdminClient with TLS settings. - */ - public static Deployment tlsAdminClient(String namespaceName, String userName, String adminName, String clusterName, String bootstrapName, String additionalConfig) { - final List tlsEnvs = buildTLSUserCredentials(userName); - final String finalAdditionalConfig = "sasl.mechanism=GSSAPI\n" + "security.protocol=" + SecurityProtocol.SSL + "\n" + "\n" + additionalConfig; - - return defaultAdminClient(namespaceName, adminName, bootstrapName, finalAdditionalConfig) - .editOrNewSpec() - .editOrNewTemplate() - .editOrNewSpec() - .editFirstContainer() - .addToEnv(getClusterCaCertEnv(clusterName)) - .addAllToEnv(tlsEnvs) - .endContainer() - .endSpec() - .endTemplate() - .endSpec() - .build(); - } - - /////////////////////////////////////////// - // SCRAM-SHA over TLS (SASL_SSL) - /////////////////////////////////////////// - - /** - * Deploys an AdminClient with SCRAM-SHA and TLS enabled and has only essential configuration. - * - * @param namespaceName The Namespace in which to deploy the AdminClient and where Kafka resides. - * @param userName The Kafka userName to correctly configure SCRAM-SHA. - * @param adminName The name of the AdminClient deployment. - * @param clusterName The name of the Kafka cluster to which the AdminClient will connect. - * @param bootstrapName The name of the Kafka bootstrap server to use for the initial connection. - */ - public static Deployment scramShaOverTlsAdminClient(String namespaceName, String userName, String adminName, String clusterName, String bootstrapName) { - return scramShaOverTlsAdminClient(namespaceName, userName, adminName, clusterName, bootstrapName, ""); - } - - /** - * Creates a Deployment configuration for an AdminClient with TLS and SCRAM_SHA settings. - */ - public static Deployment scramShaOverTlsAdminClient(String namespaceName, String userName, String adminName, String clusterName, String bootstrapName, String additionalConfig) { - String finalAdditionalConfig = getAdminClientScramConfig(namespaceName, userName, SecurityProtocol.SASL_SSL) + "\n" + additionalConfig; - // authenticating is taken care of (by SASL) thus only cluster needed - return defaultAdminClient(namespaceName, adminName, bootstrapName, finalAdditionalConfig) - .editOrNewSpec() - .editOrNewTemplate() - .editOrNewSpec() - .editFirstContainer() - .addToEnv(getClusterCaCertEnv(clusterName)) - .endContainer() - .endSpec() - .endTemplate() - .endSpec() - .build(); - } - - /////////////////////////////////////////// - // SCRAM-SHA over Plain (SASL_PLAINTEXT) - /////////////////////////////////////////// - - /** - * Deploys an AdminClient with SCRAM-SHA over PLAINTEXT and has only essential configuration. - * - * @param namespaceName The Namespace in which to deploy the AdminClient and where Kafka resides. - * @param userName The Kafka userName to correctly configure SCRAM-SHA. - * @param adminName The name of the AdminClient deployment. - * @param bootstrapName The name of the Kafka bootstrap server to use for the initial connection. - */ - public static Deployment scramShaOverPlainAdminClient(String namespaceName, String userName, String adminName, String bootstrapName) { - return scramShaOverPlainAdminClient(namespaceName, userName, adminName, bootstrapName, ""); - } - - /** - * Creates a Deployment configuration for an AdminClient with PLAINTEXT and SCRAM_SHA settings. - */ - public static Deployment scramShaOverPlainAdminClient(String namespaceName, String userName, String adminName, String bootstrapName, String additionalConfig) { - String finalAdditionalConfig = getAdminClientScramConfig(namespaceName, userName, SecurityProtocol.SASL_PLAINTEXT) + "\n" + additionalConfig; - return defaultAdminClient(namespaceName, adminName, bootstrapName, finalAdditionalConfig) - .build(); - } - - /////////////////////////////////////////// - // plain (PLAINTEXT) - /////////////////////////////////////////// - - /** - * Deploys an AdminClient with PLAINTEXT communication and no other configuration. - * - * @param namespaceName The Namespace in which to deploy the AdminClient and where Kafka resides. - * @param adminName The name of the AdminClient deployment. - * @param bootstrapName The name of the Kafka bootstrap server to use for the initial connection. - */ - public static DeploymentBuilder plainAdminClient(String namespaceName, String adminName, String bootstrapName) { - return defaultAdminClient(namespaceName, adminName, bootstrapName, ""); - } - - /////////////////////////////////////////// - // default admin client - /////////////////////////////////////////// - - /** - * Serves as base for all types pf admin clients (SCRAM_SHA, TLS, PLAINTEXT). - */ - private static DeploymentBuilder defaultAdminClient(String namespaceName, String adminName, String bootstrapName, String additionalConfig) { - Map adminLabels = new HashMap<>(); - adminLabels.put(TestConstants.APP_POD_LABEL, TestConstants.ADMIN_CLIENT_NAME); - adminLabels.put(TestConstants.KAFKA_ADMIN_CLIENT_LABEL_KEY, TestConstants.KAFKA_ADMIN_CLIENT_LABEL_VALUE); - adminLabels.put(TestConstants.DEPLOYMENT_TYPE, DeploymentTypes.AdminClient.name()); - adminLabels.put(TestConstants.APP_CONTROLLER_LABEL, adminName); - - PodSpecBuilder podSpecBuilder = new PodSpecBuilder(); - - if (Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET != null && !Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET.isEmpty()) { - List imagePullSecrets = Collections.singletonList(new LocalObjectReference(Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET)); - podSpecBuilder.withImagePullSecrets(imagePullSecrets); - } - - return new DeploymentBuilder() - .withNewMetadata() - .withNamespace(namespaceName) - .withLabels(adminLabels) - .withName(adminName) - .endMetadata() - .withNewSpec() - .withNewSelector() - .addToMatchLabels(adminLabels) - .endSelector() - .withNewTemplate() - .withNewMetadata() - .withName(adminName) - .withNamespace(namespaceName) - .withLabels(adminLabels) - .endMetadata() - .withNewSpecLike(podSpecBuilder.build()) - .addNewContainer() - .withName(adminName) - .withImagePullPolicy(TestConstants.IF_NOT_PRESENT_IMAGE_PULL_POLICY) - .withImage(Environment.TEST_CLIENTS_IMAGE) - .addNewEnv() - .withName("BOOTSTRAP_SERVERS") - .withValue(bootstrapName) - .endEnv() - .addNewEnv() - .withName("ADDITIONAL_CONFIG") - .withValue(additionalConfig) - .endEnv() - .addNewEnv() - // use custom config folder for admin-client, so we don't need to use service account etc. - .withName("CONFIG_FOLDER_PATH") - .withValue("/tmp") - .endEnv() - .withCommand("sleep") - .withArgs("infinity") - .endContainer() - .endSpec() - .endTemplate() - .endSpec(); - } - - /////////////////////////////////////////// - // Admin Client Configuration and envs - /////////////////////////////////////////// - - /** - * Constructs the (SASL) SCRAM configuration string from a secret for an admin client based on the user and security protocol. - * Works for SASL-PLAIN and SASL-SSL alike. - * - * @param namespace the namespace in which the secret is stored - * @param userName the name of the user (also used as the secret name) to fetch the SASL JAAS config - * @param securityProtocol the security protocol to use (either SASL_PLAINTEXT or SASL_SSL) - * @return a {@link String} containing the SASL mechanism, security protocol, and the SASL JAAS configuration - */ - private static String getAdminClientScramConfig(String namespace, String userName, SecurityProtocol securityProtocol) { - final String saslJaasConfigEncrypted = KubeResourceManager.get().kubeClient().getClient().secrets().inNamespace(namespace).withName(userName).get().getData().get("sasl.jaas.config"); - final String saslJaasConfigDecrypted = Util.decodeFromBase64(saslJaasConfigEncrypted); - - return "sasl.mechanism=SCRAM-SHA-512\n" + - "security.protocol=" + securityProtocol + "\n" + - "sasl.jaas.config=" + saslJaasConfigDecrypted; - } - - /** - * Creates an {@link EnvVar} environment variable for the Kafka cluster CA certificate, used for - * configuring Kafka clients to trust the cluster's CA, thereby enabling TlS and ScramSha over TlS communication. - * - * @param clusterName the name of the Kafka cluster, used to derive the name of the Kubernetes secret containing the CA certificate - * @return an {@link EnvVar} instance representing the CA certificate environment variable - */ - private static EnvVar getClusterCaCertEnv(String clusterName) { - return new EnvVarBuilder() - .withName("CA_CRT") - .withNewValueFrom() - .withNewSecretKeyRef() - .withName(KafkaResources.clusterCaCertificateSecretName(clusterName)) - .withKey("ca.crt") - .endSecretKeyRef() - .endValueFrom() - .build(); - } - - /** - * Generates a list of {@link EnvVar} environment variables for TLS configuration, including the user's certificate and key. - * These are extracted from Kubernetes secrets associated with the specified user. This setup is necessary for TLS (only) client - * authentication against a Kafka cluster. - * - * @param userName the name of the user, which corresponds to the Kubernetes secret names containing the user's TLS certificate and key - * @return a {@link List} of {@link EnvVar} instances for configuring a Kafka client with TLS - */ - private static List buildTLSUserCredentials(String userName) { - EnvVar userCertificate = new EnvVarBuilder() - .withName("USER_CRT") - .withNewValueFrom() - .withNewSecretKeyRef() - .withName(userName) - .withKey("user.crt") - .endSecretKeyRef() - .endValueFrom() - .build(); - - EnvVar userPrivateKey = new EnvVarBuilder() - .withName("USER_KEY") - .withNewValueFrom() - .withNewSecretKeyRef() - .withName(userName) - .withKey("user.key") - .endSecretKeyRef() - .endValueFrom() - .build(); - - return List.of(userCertificate, userPrivateKey); - } -} diff --git a/systemtest/src/main/java/io/strimzi/systemtest/utils/kubeUtils/objects/NetworkPolicyUtils.java b/systemtest/src/main/java/io/strimzi/systemtest/utils/kubeUtils/objects/NetworkPolicyUtils.java index 0c96dc0b0d1..468c92a1d4a 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/utils/kubeUtils/objects/NetworkPolicyUtils.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/utils/kubeUtils/objects/NetworkPolicyUtils.java @@ -12,6 +12,7 @@ import io.fabric8.kubernetes.client.CustomResource; import io.skodjob.kubetest4j.resources.KubeResourceManager; import io.strimzi.api.kafka.model.bridge.KafkaBridge; +import io.strimzi.api.kafka.model.bridge.KafkaBridgeResources; import io.strimzi.api.kafka.model.common.Spec; import io.strimzi.api.kafka.model.kafka.Status; import io.strimzi.systemtest.Environment; @@ -62,7 +63,20 @@ public static void allowNetworkPolicySettingsForClusterOperator(String namespace LOGGER.info("Network policy for LabelSelector {} successfully created", labelSelector); } - public static void allowNetworkPolicySettingsForBridgeClients(String namespace, String clientName, LabelSelector clientLabelSelector, String componentName) { + public static void allowNetworkPoliciesForBridgeClients(String namespaceName, String bridgeClusterName, String producerName, String consumerName) { + allowNetworkPolicyForBridgeClient(namespaceName, bridgeClusterName, producerName); + allowNetworkPolicyForBridgeClient(namespaceName, bridgeClusterName, consumerName); + } + + public static void allowNetworkPolicyForBridgeClient(String namespaceName, String bridgeClusterName, String clientName) { + LabelSelector clientLabelSelector = new LabelSelectorBuilder() + .addToMatchLabels("app", clientName) + .build(); + + allowNetworkPolicySettingsForBridgeClients(namespaceName, clientName, clientLabelSelector, KafkaBridgeResources.componentName(bridgeClusterName)); + } + + private static void allowNetworkPolicySettingsForBridgeClients(String namespace, String clientName, LabelSelector clientLabelSelector, String componentName) { LOGGER.info("Apply NetworkPolicy access to Kafka Bridge {} from client Pods with LabelSelector {}", componentName, clientLabelSelector); NetworkPolicy networkPolicy = NetworkPolicyTemplates.networkPolicyBuilder(namespace, clientName, clientLabelSelector) diff --git a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeST.java b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeST.java index bef40298ee6..eb003be1e76 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeST.java @@ -37,6 +37,7 @@ import io.strimzi.systemtest.utils.VerificationUtils; import io.strimzi.systemtest.utils.kafkaUtils.KafkaBridgeUtils; import io.strimzi.systemtest.utils.kubeUtils.controllers.DeploymentUtils; +import io.strimzi.systemtest.utils.kubeUtils.objects.NetworkPolicyUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils; import io.strimzi.testclients.clients.http.HttpProducerConsumer; import io.strimzi.testclients.clients.http.HttpProducerConsumerBuilder; @@ -102,6 +103,9 @@ class HttpBridgeST extends AbstractST { void testSendSimpleMessage() { final TestStorage testStorage = new TestStorage(KubeResourceManager.get().getTestContext()); + // Create NetworkPolicy for HTTP producer to access Bridge + NetworkPolicyUtils.allowNetworkPolicyForBridgeClient(testStorage.getNamespaceName(), suiteTestStorage.getClusterName(), testStorage.getProducerName()); + final HttpProducerConsumer httpProducerConsumer = httpProducerConsumerBuilder .withProducerName(testStorage.getProducerName()) .withTopicName(testStorage.getTopicName()) @@ -144,6 +148,9 @@ void testSendSimpleMessage() { void testReceiveSimpleMessage() { final TestStorage testStorage = new TestStorage(KubeResourceManager.get().getTestContext()); + // Create NetworkPolicy for HTTP consumer to access Bridge + NetworkPolicyUtils.allowNetworkPolicyForBridgeClient(testStorage.getNamespaceName(), suiteTestStorage.getClusterName(), testStorage.getConsumerName()); + KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(Environment.TEST_SUITE_NAMESPACE, testStorage.getTopicName(), suiteTestStorage.getClusterName()).build()); final HttpProducerConsumer httpProducerConsumer = httpProducerConsumerBuilder diff --git a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeScramShaST.java b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeScramShaST.java index 021796f572a..3b77be5595c 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeScramShaST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeScramShaST.java @@ -31,6 +31,7 @@ import io.strimzi.systemtest.templates.crd.KafkaTopicTemplates; import io.strimzi.systemtest.templates.crd.KafkaUserTemplates; import io.strimzi.systemtest.utils.ClientUtils; +import io.strimzi.systemtest.utils.kubeUtils.objects.NetworkPolicyUtils; import io.strimzi.testclients.clients.http.HttpProducerConsumer; import io.strimzi.testclients.clients.http.HttpProducerConsumerBuilder; import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; @@ -98,6 +99,9 @@ void testSendSimpleMessageTlsScramSha() { .withConsumerName(testStorage.getConsumerName()) .build(); + // Create NetworkPolicy for HTTP producer to access Bridge + NetworkPolicyUtils.allowNetworkPolicyForBridgeClient(testStorage.getNamespaceName(), suiteTestStorage.getClusterName(), testStorage.getProducerName()); + // Create topic KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), suiteTestStorage.getClusterName()).build()); @@ -125,6 +129,9 @@ void testSendSimpleMessageTlsScramSha() { void testReceiveSimpleMessageTlsScramSha() { final TestStorage testStorage = new TestStorage(KubeResourceManager.get().getTestContext()); + // Create NetworkPolicy for HTTP consumer to access Bridge + NetworkPolicyUtils.allowNetworkPolicyForBridgeClient(testStorage.getNamespaceName(), suiteTestStorage.getClusterName(), testStorage.getConsumerName()); + final HttpProducerConsumer bridgeProducerConsumer = httpProducerConsumerBuilder .withTopicName(testStorage.getTopicName()) .withConsumerName(testStorage.getConsumerName()) diff --git a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeServerTlsST.java b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeServerTlsST.java index 1cddbd21282..2b6b17bab74 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeServerTlsST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeServerTlsST.java @@ -25,6 +25,7 @@ import io.strimzi.systemtest.templates.crd.KafkaTopicTemplates; import io.strimzi.systemtest.templates.crd.KafkaUserTemplates; import io.strimzi.systemtest.utils.ClientUtils; +import io.strimzi.systemtest.utils.kubeUtils.objects.NetworkPolicyUtils; import io.strimzi.testclients.clients.http.HttpProducerConsumer; import io.strimzi.testclients.clients.http.HttpProducerConsumerBuilder; import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; @@ -78,6 +79,9 @@ class HttpBridgeServerTlsST extends AbstractST { void testSendSimpleMessageTls() { final TestStorage testStorage = new TestStorage(KubeResourceManager.get().getTestContext()); + // Create NetworkPolicy for HTTP producer to access Bridge + NetworkPolicyUtils.allowNetworkPolicyForBridgeClient(testStorage.getNamespaceName(), suiteTestStorage.getClusterName(), testStorage.getProducerName()); + HttpProducerConsumer bridgeProducerConsumer = httpProducerConsumerBuilder .withTopicName(testStorage.getTopicName()) .withProducerName(testStorage.getProducerName()) @@ -117,6 +121,9 @@ void testSendSimpleMessageTls() { void testReceiveSimpleMessageTls() { final TestStorage testStorage = new TestStorage(KubeResourceManager.get().getTestContext()); + // Create NetworkPolicy for HTTP consumer to access Bridge + NetworkPolicyUtils.allowNetworkPolicyForBridgeClient(testStorage.getNamespaceName(), suiteTestStorage.getClusterName(), testStorage.getConsumerName()); + HttpProducerConsumer bridgeProducerConsumer = httpProducerConsumerBuilder .withTopicName(testStorage.getTopicName()) .withConsumerName(testStorage.getConsumerName()) diff --git a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeTlsST.java b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeTlsST.java index 14b1a93463f..5eb562b2a12 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeTlsST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/bridge/HttpBridgeTlsST.java @@ -37,8 +37,12 @@ import io.strimzi.systemtest.templates.crd.KafkaTopicTemplates; import io.strimzi.systemtest.templates.crd.KafkaUserTemplates; import io.strimzi.systemtest.utils.ClientUtils; +import io.strimzi.systemtest.utils.kubeUtils.objects.NetworkPolicyUtils; +import io.strimzi.testclients.clients.http.HttpConsumerClient; +import io.strimzi.testclients.clients.http.HttpConsumerClientBuilder; import io.strimzi.testclients.clients.http.HttpProducerConsumer; import io.strimzi.testclients.clients.http.HttpProducerConsumerBuilder; +import io.strimzi.testclients.clients.kafka.KafkaProducerClientBuilder; import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.kafka.clients.consumer.ConsumerConfig; @@ -91,6 +95,9 @@ class HttpBridgeTlsST extends AbstractST { void testSendSimpleMessageTls() { final TestStorage testStorage = new TestStorage(KubeResourceManager.get().getTestContext()); + // Create NetworkPolicy for HTTP producer to access Bridge + NetworkPolicyUtils.allowNetworkPolicyForBridgeClient(testStorage.getNamespaceName(), suiteTestStorage.getClusterName(), testStorage.getProducerName()); + final HttpProducerConsumer httpProducerConsumer = httpProducerConsumerBuilder .withTopicName(testStorage.getTopicName()) .withProducerName(testStorage.getProducerName()) @@ -130,6 +137,9 @@ void testSendSimpleMessageTls() { void testReceiveSimpleMessageTls() { final TestStorage testStorage = new TestStorage(KubeResourceManager.get().getTestContext()); + // Create NetworkPolicy for HTTP consumer to access Bridge + NetworkPolicyUtils.allowNetworkPolicyForBridgeClient(testStorage.getNamespaceName(), suiteTestStorage.getClusterName(), testStorage.getConsumerName()); + final HttpProducerConsumer httpProducerConsumer = httpProducerConsumerBuilder .withTopicName(testStorage.getTopicName()) .withConsumerName(testStorage.getConsumerName()) @@ -208,7 +218,6 @@ void testTlsAuthWithWeirdUsername() { private void testWeirdUsername(String weirdUserName, KafkaListenerAuthentication auth, KafkaBridgeSpec spec, TestStorage testStorage) { - String bridgeProducerName = testStorage.getProducerName() + "-" + TestTags.BRIDGE; String bridgeConsumerName = testStorage.getConsumerName() + "-" + TestTags.BRIDGE; KubeResourceManager.get().createResourceWithWait( @@ -230,9 +239,11 @@ private void testWeirdUsername(String weirdUserName, KafkaListenerAuthentication .endSpec() .build()); - HttpProducerConsumer httpProducerConsumer = new HttpProducerConsumerBuilder() - .withProducerName(bridgeProducerName) - .withConsumerName(bridgeConsumerName) + // Create NetworkPolicy for HTTP consumer to access Bridge + NetworkPolicyUtils.allowNetworkPolicyForBridgeClient(testStorage.getNamespaceName(), testStorage.getClusterName(), bridgeConsumerName); + + HttpConsumerClient httpConsumer = new HttpConsumerClientBuilder() + .withName(bridgeConsumerName) .withHostname(KafkaBridgeResources.serviceName(testStorage.getClusterName())) .withTopicName(testStorage.getTopicName()) .withMessageCount(testStorage.getMessageCount()) @@ -262,11 +273,10 @@ private void testWeirdUsername(String weirdUserName, KafkaListenerAuthentication .endSpec() .build()); - KubeResourceManager.get().createResourceWithWait(httpProducerConsumer.getConsumer().getJob()); + KubeResourceManager.get().createResourceWithWait(httpConsumer.getJob()); - final KafkaProducerConsumerBuilder producerConsumerBuilder = new KafkaProducerConsumerBuilder() - .withProducerName(testStorage.getProducerName()) - .withConsumerName(testStorage.getConsumerName()) + final KafkaProducerClientBuilder kafkaProducerBuilder = new KafkaProducerClientBuilder() + .withName(testStorage.getProducerName()) .withNamespaceName(testStorage.getNamespaceName()) .withMessageCount(testStorage.getMessageCount()) .withTopicName(testStorage.getTopicName()) @@ -274,15 +284,15 @@ private void testWeirdUsername(String weirdUserName, KafkaListenerAuthentication if (auth.getType().equals(TestConstants.TLS_LISTENER_DEFAULT_NAME)) { // tls producer - producerConsumerBuilder + kafkaProducerBuilder .withAuthentication(ClientsAuthentication.configureTls(testStorage.getClusterName(), weirdUserName)); } else { // scram-sha producer - producerConsumerBuilder + kafkaProducerBuilder .withAuthentication(ClientsAuthentication.configureTlsScramSha(testStorage.getNamespaceName(), weirdUserName, testStorage.getClusterName())); } - KubeResourceManager.get().createResourceWithWait(producerConsumerBuilder.build().getProducer().getJob()); + KubeResourceManager.get().createResourceWithWait(kafkaProducerBuilder.build().getJob()); ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectBuilderST.java b/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectBuilderST.java index 668dce1340b..5a6e8f5ccc1 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectBuilderST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/connect/ConnectBuilderST.java @@ -311,7 +311,7 @@ void testBuildWithJarTgzAndZip() { ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); String connectPodName = PodUtils.listPodNames(testStorage.getNamespaceName(), testStorage.getKafkaConnectSelector()).get(0); - PodUtils.waitUntilMessageIsInPodLogs(testStorage.getNamespaceName(), connectPodName, "Received message with key 'null' and value 'Hello-world - 99'"); + PodUtils.waitUntilMessageIsInPodLogs(testStorage.getNamespaceName(), connectPodName, "Received message with key 'null' and value 'Hello world - 99'"); } @OpenShiftOnly diff --git a/systemtest/src/test/java/io/strimzi/systemtest/metrics/MetricsST.java b/systemtest/src/test/java/io/strimzi/systemtest/metrics/MetricsST.java index 972931c1330..d14a539993b 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/metrics/MetricsST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/metrics/MetricsST.java @@ -11,7 +11,6 @@ import io.fabric8.kubernetes.api.model.ConfigMapKeySelector; import io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder; import io.fabric8.kubernetes.api.model.LabelSelector; -import io.fabric8.kubernetes.api.model.LabelSelectorBuilder; import io.skodjob.annotations.Desc; import io.skodjob.annotations.Label; import io.skodjob.annotations.Step; @@ -474,8 +473,9 @@ void testKafkaBridgeMetrics() { .withComponent(KafkaBridgeMetricsComponent.create(namespaceFirst, bridgeClusterName)) .build(); - NetworkPolicyUtils.allowNetworkPolicySettingsForBridgeClients(testStorage.getNamespaceName(), testStorage.getProducerName(), new LabelSelectorBuilder().addToMatchLabels("app", testStorage.getProducerName()).build(), KafkaBridgeResources.componentName(bridgeClusterName)); - NetworkPolicyUtils.allowNetworkPolicySettingsForBridgeClients(testStorage.getNamespaceName(), testStorage.getConsumerName(), new LabelSelectorBuilder().addToMatchLabels("app", testStorage.getConsumerName()).build(), KafkaBridgeResources.componentName(bridgeClusterName)); + // Create NetworkPolicies for HTTP clients to access Bridge + NetworkPolicyUtils.allowNetworkPoliciesForBridgeClients(namespaceFirst, bridgeClusterName, testStorage.getProducerName(), testStorage.getConsumerName()); + // Attach consumer before producer HttpProducerConsumer httpProducerConsumer = new HttpProducerConsumerBuilder() .withNamespaceName(namespaceFirst) diff --git a/systemtest/src/test/java/io/strimzi/systemtest/metrics/StrimziMetricsReporterST.java b/systemtest/src/test/java/io/strimzi/systemtest/metrics/StrimziMetricsReporterST.java index a0b8d2ee572..4de51bf815a 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/metrics/StrimziMetricsReporterST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/metrics/StrimziMetricsReporterST.java @@ -93,7 +93,7 @@ public class StrimziMetricsReporterST extends AbstractST { private static final String MM2_CLUSTER_NAME = "my-mm2"; private static final String BRIDGE_NAME = "my-bridge"; - private TestStorage testStorage; + private TestStorage suiteTestStorage; private BaseMetricsCollector kafkaCollector; @ParallelTest @@ -136,8 +136,8 @@ void testKafkaMetrics() { ) void testKafkaConnectAndConnectorMetrics() { KubeResourceManager.get().createResourceWithWait( - KafkaConnectTemplates.kafkaConnectWithFilePlugin(testStorage.getNamespaceName(), CONNECT_CLUSTER_NAME, - testStorage.getClusterName(), 1) + KafkaConnectTemplates.kafkaConnectWithFilePlugin(suiteTestStorage.getNamespaceName(), CONNECT_CLUSTER_NAME, + suiteTestStorage.getClusterName(), 1) .editMetadata() .addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true") .endMetadata() @@ -151,7 +151,7 @@ void testKafkaConnectAndConnectorMetrics() { ); KubeResourceManager.get().createResourceWithWait( - KafkaConnectorTemplates.kafkaConnector(testStorage.getNamespaceName(), CONNECT_CLUSTER_NAME).build() + KafkaConnectorTemplates.kafkaConnector(suiteTestStorage.getNamespaceName(), CONNECT_CLUSTER_NAME).build() ); BaseMetricsCollector kafkaConnectCollector = kafkaCollector.toBuilder() @@ -184,12 +184,12 @@ void testKafkaConnectAndConnectorMetrics() { ) void testMirrorMaker2Metrics() { KubeResourceManager.get().createResourceWithWait( - KafkaNodePoolTemplates.mixedPool(testStorage.getNamespaceName(), testStorage.getTargetBrokerPoolName(), - testStorage.getTargetClusterName(), TARGET_BROKER_REPLICAS).build() + KafkaNodePoolTemplates.mixedPool(suiteTestStorage.getNamespaceName(), suiteTestStorage.getTargetBrokerPoolName(), + suiteTestStorage.getTargetClusterName(), TARGET_BROKER_REPLICAS).build() ); KubeResourceManager.get().createResourceWithWait( - KafkaTemplates.kafka(testStorage.getNamespaceName(), testStorage.getTargetClusterName(), TARGET_BROKER_REPLICAS) + KafkaTemplates.kafka(suiteTestStorage.getNamespaceName(), suiteTestStorage.getTargetClusterName(), TARGET_BROKER_REPLICAS) .editSpec() .editEntityOperator() .withTopicOperator(null) @@ -199,8 +199,8 @@ void testMirrorMaker2Metrics() { ); KubeResourceManager.get().createResourceWithWait( - KafkaMirrorMaker2Templates.kafkaMirrorMaker2(testStorage.getNamespaceName(), MM2_CLUSTER_NAME, - testStorage.getClusterName(), testStorage.getTargetClusterName(), 1, false) + KafkaMirrorMaker2Templates.kafkaMirrorMaker2(suiteTestStorage.getNamespaceName(), MM2_CLUSTER_NAME, + suiteTestStorage.getClusterName(), suiteTestStorage.getTargetClusterName(), 1, false) .editOrNewSpec() .withNewStrimziMetricsReporterConfig() .withNewValues() @@ -238,9 +238,11 @@ void testMirrorMaker2Metrics() { } ) void testKafkaBridgeMetrics() { + final TestStorage testStorage = new TestStorage(KubeResourceManager.get().getTestContext()); + KubeResourceManager.get().createResourceWithWait( - KafkaBridgeTemplates.kafkaBridge(testStorage.getNamespaceName(), BRIDGE_NAME, - KafkaResources.plainBootstrapAddress(testStorage.getClusterName()), 1) + KafkaBridgeTemplates.kafkaBridge(suiteTestStorage.getNamespaceName(), BRIDGE_NAME, + KafkaResources.plainBootstrapAddress(suiteTestStorage.getClusterName()), 1) .editSpec() .withNewStrimziMetricsReporterConfig() .withNewValues() @@ -251,17 +253,20 @@ void testKafkaBridgeMetrics() { ); // allow connections from scraper to Bridge pods when NetworkPolicies are set to denied by default - NetworkPolicyUtils.allowNetworkPolicySettingsForBridgeScraper(testStorage.getNamespaceName(), - testStorage.getScraperName(), KafkaBridgeResources.componentName(BRIDGE_NAME)); + NetworkPolicyUtils.allowNetworkPolicySettingsForBridgeScraper(suiteTestStorage.getNamespaceName(), + suiteTestStorage.getScraperName(), KafkaBridgeResources.componentName(BRIDGE_NAME)); + + // Create NetworkPolicies for HTTP clients to access Bridge + NetworkPolicyUtils.allowNetworkPoliciesForBridgeClients(suiteTestStorage.getNamespaceName(), BRIDGE_NAME, testStorage.getProducerName(), testStorage.getConsumerName()); // Attach consumer before producer HttpProducerConsumer httpProducerConsumer = new HttpProducerConsumerBuilder() - .withNamespaceName(testStorage.getNamespaceName()) + .withNamespaceName(suiteTestStorage.getNamespaceName()) .withProducerName(testStorage.getProducerName()) .withConsumerName(testStorage.getConsumerName()) .withHostname(KafkaBridgeResources.serviceName(BRIDGE_NAME)) - .withTopicName(testStorage.getTopicName()) - .withMessageCount(testStorage.getMessageCount()) + .withTopicName(suiteTestStorage.getTopicName()) + .withMessageCount(suiteTestStorage.getMessageCount()) .withPort(TestConstants.HTTP_BRIDGE_DEFAULT_PORT) .withDelayMs(200) .build(); @@ -273,7 +278,7 @@ void testKafkaBridgeMetrics() { ); BaseMetricsCollector bridgeCollector = kafkaCollector.toBuilder() - .withComponent(KafkaBridgeMetricsComponent.create(testStorage.getNamespaceName(), BRIDGE_NAME)) + .withComponent(KafkaBridgeMetricsComponent.create(suiteTestStorage.getNamespaceName(), BRIDGE_NAME)) .build(); bridgeCollector.collectMetricsFromPods(TestConstants.METRICS_COLLECT_TIMEOUT); @@ -308,11 +313,11 @@ void testDynamicReconfigurationAllowList() { // And check that `kafka_log_log_logendoffset` is not present assertThat(MetricsUtils.createPatternAndCollectWithoutWait(kafkaCollector, "kafka_log_log_logendoffset").isEmpty(), is(true)); - Map kafkaPods = PodUtils.podSnapshot(testStorage.getNamespaceName(), LabelSelectors.allKafkaPodsLabelSelector(testStorage.getClusterName())); + Map kafkaPods = PodUtils.podSnapshot(suiteTestStorage.getNamespaceName(), LabelSelectors.allKafkaPodsLabelSelector(suiteTestStorage.getClusterName())); // change configuration to disable kafka_server.* metrics and add kafka_controller.* metrics LOGGER.info("Change the allowList to allow `kafka_log.*`."); - KafkaUtils.replace(testStorage.getNamespaceName(), testStorage.getClusterName(), kafka -> { + KafkaUtils.replace(suiteTestStorage.getNamespaceName(), suiteTestStorage.getClusterName(), kafka -> { StrimziMetricsReporter config = (StrimziMetricsReporter) kafka.getSpec().getKafka().getMetricsConfig(); config.getValues().setAllowList(List.of("kafka_log.*")); @@ -320,7 +325,7 @@ void testDynamicReconfigurationAllowList() { } ); - RollingUpdateUtils.waitForNoRollingUpdate(testStorage.getNamespaceName(), LabelSelectors.allKafkaPodsLabelSelector(testStorage.getClusterName()), kafkaPods); + RollingUpdateUtils.waitForNoRollingUpdate(suiteTestStorage.getNamespaceName(), LabelSelectors.allKafkaPodsLabelSelector(suiteTestStorage.getClusterName()), kafkaPods); LOGGER.info("Check if Kafka Pods are missing the 'kafka_server_' metrics"); kafkaCollector.collectMetricsFromPods(TestConstants.METRICS_COLLECT_TIMEOUT); @@ -330,7 +335,7 @@ void testDynamicReconfigurationAllowList() { assertMetricValueNotNull(kafkaCollector, "kafka_log_log_logstartoffset\\{partition=\"0\",topic=\"__cluster_metadata\"\\}"); LOGGER.info("Changing back to previous state - allowing `kafka_server.*` metrics"); - KafkaUtils.replace(testStorage.getNamespaceName(), testStorage.getClusterName(), kafka -> { + KafkaUtils.replace(suiteTestStorage.getNamespaceName(), suiteTestStorage.getClusterName(), kafka -> { StrimziMetricsReporter config = (StrimziMetricsReporter) kafka.getSpec().getKafka().getMetricsConfig(); config.getValues().setAllowList(List.of("kafka_server.*")); @@ -338,7 +343,7 @@ void testDynamicReconfigurationAllowList() { } ); - RollingUpdateUtils.waitForNoRollingUpdate(testStorage.getNamespaceName(), LabelSelectors.allKafkaPodsLabelSelector(testStorage.getClusterName()), kafkaPods); + RollingUpdateUtils.waitForNoRollingUpdate(suiteTestStorage.getNamespaceName(), LabelSelectors.allKafkaPodsLabelSelector(suiteTestStorage.getClusterName()), kafkaPods); LOGGER.info("Check if Kafka Pods are not missing the 'kafka_server_' metrics"); kafkaCollector.collectMetricsFromPods(TestConstants.METRICS_COLLECT_TIMEOUT); @@ -348,7 +353,7 @@ void testDynamicReconfigurationAllowList() { @BeforeAll void setupEnvironment() { - testStorage = new TestStorage(KubeResourceManager.get().getTestContext()); + suiteTestStorage = new TestStorage(KubeResourceManager.get().getTestContext()); // metrics tests are not designed to run with namespace RBAC scope assumeFalse(Environment.isNamespaceRbacScope()); @@ -358,17 +363,17 @@ void setupEnvironment() { .withDefaultConfiguration() .install(); - cluster.setNamespace(testStorage.getNamespaceName()); + cluster.setNamespace(suiteTestStorage.getNamespaceName()); KubeResourceManager.get().createResourceWithWait( - KafkaNodePoolTemplates.brokerPool(testStorage.getNamespaceName(), testStorage.getBrokerPoolName(), - testStorage.getClusterName(), BROKER_REPLICAS).build(), - KafkaNodePoolTemplates.controllerPool(testStorage.getNamespaceName(), testStorage.getControllerPoolName(), - testStorage.getClusterName(), 1).build() + KafkaNodePoolTemplates.brokerPool(suiteTestStorage.getNamespaceName(), suiteTestStorage.getBrokerPoolName(), + suiteTestStorage.getClusterName(), BROKER_REPLICAS).build(), + KafkaNodePoolTemplates.controllerPool(suiteTestStorage.getNamespaceName(), suiteTestStorage.getControllerPoolName(), + suiteTestStorage.getClusterName(), 1).build() ); KubeResourceManager.get().createResourceWithWait( - KafkaTemplates.kafka(testStorage.getNamespaceName(), testStorage.getClusterName(), BROKER_REPLICAS) + KafkaTemplates.kafka(suiteTestStorage.getNamespaceName(), suiteTestStorage.getClusterName(), BROKER_REPLICAS) .editSpec() .editKafka() .withNewStrimziMetricsReporterConfig() @@ -381,9 +386,9 @@ void setupEnvironment() { ); KubeResourceManager.get().createResourceWithoutWait( - ScraperTemplates.scraperPod(testStorage.getNamespaceName(), testStorage.getScraperName()).build(), - KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), - testStorage.getClusterName(), 5, BROKER_REPLICAS).build() + ScraperTemplates.scraperPod(suiteTestStorage.getNamespaceName(), suiteTestStorage.getScraperName()).build(), + KafkaTopicTemplates.topic(suiteTestStorage.getNamespaceName(), suiteTestStorage.getTopicName(), + suiteTestStorage.getClusterName(), 5, BROKER_REPLICAS).build() ); // wait some time for metrics to be stable, at least reconciliation interval + 10s @@ -393,9 +398,9 @@ void setupEnvironment() { kafkaCollector = new BaseMetricsCollector.Builder() .withScraperPodName(KubeResourceManager.get().kubeClient() - .listPodsByPrefixInName(testStorage.getNamespaceName(), testStorage.getScraperName()).get(0).getMetadata().getName()) - .withNamespaceName(testStorage.getNamespaceName()) - .withComponent(KafkaMetricsComponent.create(testStorage.getClusterName())) + .listPodsByPrefixInName(suiteTestStorage.getNamespaceName(), suiteTestStorage.getScraperName()).get(0).getMetadata().getName()) + .withNamespaceName(suiteTestStorage.getNamespaceName()) + .withComponent(KafkaMetricsComponent.create(suiteTestStorage.getClusterName())) .build(); } } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/mirrormaker/MirrorMaker2ST.java b/systemtest/src/test/java/io/strimzi/systemtest/mirrormaker/MirrorMaker2ST.java index 2b177dd26fd..4ecbc648a18 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/mirrormaker/MirrorMaker2ST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/mirrormaker/MirrorMaker2ST.java @@ -506,7 +506,7 @@ void testMirrorMaker2TlsAndScramSha512Auth() { .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getSourceClusterName())) .withMessageCount(testStorage.getMessageCount()) - .withAuthentication(ClientsAuthentication.configureTls(testStorage.getSourceClusterName(), testStorage.getSourceUsername())) + .withAuthentication(ClientsAuthentication.configureTlsScramSha(testStorage.getNamespaceName(), testStorage.getSourceUsername(), testStorage.getSourceClusterName())) .build(); KubeResourceManager.get().createResourceWithWait( @@ -550,7 +550,7 @@ void testMirrorMaker2TlsAndScramSha512Auth() { .withConsumerGroup(ClientUtils.generateRandomConsumerGroup()) .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getTargetClusterName())) .withMessageCount(testStorage.getMessageCount()) - .withAuthentication(ClientsAuthentication.configureTls(testStorage.getTargetClusterName(), testStorage.getTargetUsername())) + .withAuthentication(ClientsAuthentication.configureTlsScramSha(testStorage.getNamespaceName(), testStorage.getTargetUsername(), testStorage.getTargetClusterName())) .build(); KubeResourceManager.get().createResourceWithWait( @@ -567,7 +567,7 @@ void testMirrorMaker2TlsAndScramSha512Auth() { .withBootstrapAddress(KafkaResources.tlsBootstrapAddress(testStorage.getTargetClusterName())) .withName(testStorage.getAdminName()) .withNamespaceName(testStorage.getNamespaceName()) - .withAuthentication(ClientsAuthentication.configureTls(testStorage.getTargetClusterName(), testStorage.getTargetUsername())) + .withAuthentication(ClientsAuthentication.configureTlsScramSha(testStorage.getNamespaceName(), testStorage.getTargetUsername(), testStorage.getTargetClusterName())) .build(); KubeResourceManager.get().createResourceWithWait(kafkaAdminClient.getDeployment()); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/security/SecurityST.java b/systemtest/src/test/java/io/strimzi/systemtest/security/SecurityST.java index a84769c588b..87e493eef95 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/security/SecurityST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/security/SecurityST.java @@ -1073,8 +1073,12 @@ void testCaRenewalBreakInMiddle() { kafkaProducerConsumer.setConsumerGroup(ClientUtils.generateRandomConsumerGroup()); kafkaProducerConsumer.setTopicName(topicName); - KubeResourceManager.get().createResourceWithWait(kafkaProducerConsumer.getConsumer().getJob()); - ClientUtils.waitForClientSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getMessageCount()); + + KubeResourceManager.get().createResourceWithWait( + kafkaProducerConsumer.getProducer().getJob(), + kafkaProducerConsumer.getConsumer().getJob() + ); + ClientUtils.waitForClientsSuccess(testStorage.getNamespaceName(), testStorage.getConsumerName(), testStorage.getProducerName(), testStorage.getMessageCount()); } @ParallelNamespaceTest diff --git a/systemtest/src/test/java/io/strimzi/systemtest/security/oauth/OauthPlainST.java b/systemtest/src/test/java/io/strimzi/systemtest/security/oauth/OauthPlainST.java index 07f14466b99..1cea151f9a4 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/security/oauth/OauthPlainST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/security/oauth/OauthPlainST.java @@ -556,6 +556,9 @@ void testProducerConsumerBridgeWithOauthMetrics() { String bridgeProducerName = "bridge-producer-" + testStorage.getClusterName(); + // Create NetworkPolicy for HTTP producer to access Bridge + NetworkPolicyUtils.allowNetworkPolicyForBridgeClient(Environment.TEST_SUITE_NAMESPACE, oauthClusterName, bridgeProducerName); + HttpProducerClient httpProducer = new HttpProducerClientBuilder() .withNamespaceName(Environment.TEST_SUITE_NAMESPACE) .withName(bridgeProducerName) diff --git a/systemtest/src/test/java/io/strimzi/systemtest/specific/RackAwarenessST.java b/systemtest/src/test/java/io/strimzi/systemtest/specific/RackAwarenessST.java index ec3f22dbda7..728538c5a42 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/specific/RackAwarenessST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/specific/RackAwarenessST.java @@ -28,13 +28,14 @@ import io.strimzi.systemtest.templates.crd.KafkaNodePoolTemplates; import io.strimzi.systemtest.templates.crd.KafkaTemplates; import io.strimzi.systemtest.templates.crd.KafkaTopicTemplates; -import io.strimzi.systemtest.templates.specific.AdminClientTemplates; import io.strimzi.systemtest.utils.AdminClientUtils; import io.strimzi.systemtest.utils.ClientUtils; import io.strimzi.systemtest.utils.StUtils; import io.strimzi.systemtest.utils.kafkaUtils.KafkaConnectUtils; import io.strimzi.systemtest.utils.kubeUtils.controllers.StrimziPodSetUtils; import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils; +import io.strimzi.testclients.clients.kafka.KafkaAdminClient; +import io.strimzi.testclients.clients.kafka.KafkaAdminClientBuilder; import io.strimzi.testclients.clients.kafka.KafkaProducerConsumer; import io.strimzi.testclients.clients.kafka.KafkaProducerConsumerBuilder; import org.apache.logging.log4j.LogManager; @@ -97,9 +98,14 @@ void testKafkaRackAwareness() { String podName = PodUtils.getPodNameByPrefix(testStorage.getNamespaceName(), testStorage.getBrokerComponentName()); Pod pod = KubeResourceManager.get().kubeClient().getClient().pods().inNamespace(testStorage.getNamespaceName()).withName(podName).get(); - KubeResourceManager.get().createResourceWithWait( - AdminClientTemplates.plainAdminClient(testStorage.getNamespaceName(), testStorage.getAdminName(), KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).build() - ); + final KafkaAdminClient kafkaAdminClient = new KafkaAdminClientBuilder() + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withName(testStorage.getAdminName()) + .withNamespaceName(testStorage.getNamespaceName()) + .build(); + + KubeResourceManager.get().createResourceWithWait(kafkaAdminClient.getDeployment()); + final AdminClient adminClient = AdminClientUtils.getConfiguredAdminClient(testStorage.getNamespaceName(), testStorage.getAdminName()); // check that spec matches the actual pod configuration @@ -415,9 +421,14 @@ void testKafkaEnvironmentVariableRackAwareness() { .build()); LOGGER.info("Kafka cluster deployed successfully"); - KubeResourceManager.get().createResourceWithWait( - AdminClientTemplates.plainAdminClient(testStorage.getNamespaceName(), testStorage.getAdminName(), KafkaResources.plainBootstrapAddress(testStorage.getClusterName())).build() - ); + final KafkaAdminClient kafkaAdminClient = new KafkaAdminClientBuilder() + .withBootstrapAddress(KafkaResources.plainBootstrapAddress(testStorage.getClusterName())) + .withName(testStorage.getAdminName()) + .withNamespaceName(testStorage.getNamespaceName()) + .build(); + + KubeResourceManager.get().createResourceWithWait(kafkaAdminClient.getDeployment()); + final AdminClient adminClient = AdminClientUtils.getConfiguredAdminClient(testStorage.getNamespaceName(), testStorage.getAdminName()); String nodeDescription = adminClient.describeNodes("all"); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/tracing/OpenTelemetryST.java b/systemtest/src/test/java/io/strimzi/systemtest/tracing/OpenTelemetryST.java index 20532feec5f..e464da68768 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/tracing/OpenTelemetryST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/tracing/OpenTelemetryST.java @@ -27,6 +27,7 @@ import io.strimzi.systemtest.templates.crd.KafkaTopicTemplates; import io.strimzi.systemtest.templates.specific.ScraperTemplates; import io.strimzi.systemtest.utils.ClientUtils; +import io.strimzi.systemtest.utils.kubeUtils.objects.NetworkPolicyUtils; import io.strimzi.systemtest.utils.specific.TracingUtils; import io.strimzi.testclients.clients.http.HttpProducerClient; import io.strimzi.testclients.clients.http.HttpProducerClientBuilder; @@ -369,6 +370,9 @@ void testKafkaBridgeService() { final String bridgeProducer = "bridge-producer"; KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), testStorage.getClusterName()).build()); + // Create NetworkPolicy for HTTP producer to access Bridge + NetworkPolicyUtils.allowNetworkPolicyForBridgeClient(testStorage.getNamespaceName(), testStorage.getClusterName(), bridgeProducer); + HttpProducerClient httpProducer = new HttpProducerClientBuilder() .withNamespaceName(testStorage.getNamespaceName()) .withName(bridgeProducer) @@ -437,6 +441,10 @@ void testKafkaBridgeServiceWithHttpTracing() { KubeResourceManager.get().createResourceWithWait(KafkaTopicTemplates.topic(testStorage.getNamespaceName(), testStorage.getTopicName(), testStorage.getClusterName()).build()); final String bridgeProducer = "bridge-producer"; + + // Create NetworkPolicy for HTTP producer to access Bridge + NetworkPolicyUtils.allowNetworkPolicyForBridgeClient(testStorage.getNamespaceName(), testStorage.getClusterName(), bridgeProducer); + HttpProducerClient httpProducer = new HttpProducerClientBuilder() .withNamespaceName(testStorage.getNamespaceName()) .withName(bridgeProducer) From 10eb10be9fa7b8a30bc4c27c9ee948da548b783b Mon Sep 17 00:00:00 2001 From: Lukas Kral Date: Fri, 17 Apr 2026 12:55:20 +0200 Subject: [PATCH 4/4] Add Javadoc to new methods Signed-off-by: Lukas Kral --- .../kafkaclients/ClientsAuthentication.java | 87 +++++++++++++++++++ .../kubeUtils/objects/NetworkPolicyUtils.java | 15 ++++ .../security/PodSecurityProfilesST.java | 7 ++ .../systemtest/tracing/OpenTelemetryST.java | 7 ++ 4 files changed, 116 insertions(+) diff --git a/systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/ClientsAuthentication.java b/systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/ClientsAuthentication.java index e600cce7959..c7fb1b5523b 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/ClientsAuthentication.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/kafkaclients/ClientsAuthentication.java @@ -17,7 +17,21 @@ import java.util.ArrayList; import java.util.List; +/** + * Class containing methods for handling the creation of {@link Authentication} object with configuration of + * particular authentication. These methods are used in the Test-Clients builders for easier manipulation and creation + * of desired clients. + */ public class ClientsAuthentication { + /** + * Creates {@link Authentication} for SCRAM-SHA-512 over TLS. + * + * @param namespaceName Name of the Namespace where the KafkaUser's Secret with `sasl.jaas.config` is present. + * @param userName Name of the KafkaUser for which we should obtain the data. + * @param clusterName Name of the Kafka cluster for which the cluster CA certificate is obtained. + * + * @return configured {@link Authentication} with SCRAM-SHA-512 over TLS. + */ public static Authentication configureTlsScramSha(String namespaceName, String userName, String clusterName) { return configureScramSha(namespaceName, userName, SecurityProtocol.SASL_SSL) .withNewSsl() @@ -26,10 +40,28 @@ public static Authentication configureTlsScramSha(String namespaceName, String u .build(); } + /** + * Creates {@link Authentication} for SCRAM-SHA-512 over PLAIN. + * + * @param namespaceName Name of the Namespace where the KafkaUser's Secret with `sasl.jaas.config` is present. + * @param userName Name of the KafkaUser for which we should obtain the data. + * + * @return configured {@link Authentication} with SCRAM-SHA-512 over PLAIN. + */ public static Authentication configurePlainScramSha(String namespaceName, String userName) { return configureScramSha(namespaceName, userName, SecurityProtocol.SASL_PLAINTEXT).build(); } + /** + * Default method for creating {@link AuthenticationBuilder} with SCRAM-SHA-512. + * This can be then configured to have a different security protocol and then extended with other configuration + * - for example the SSL config. + * + * @param namespaceName Name of the Namespace where the KafkaUser's Secret with `sasl.jaas.config` is present. + * @param userName Name of the KafkaUser for which we should obtain the data. + * + * @return configured {@link AuthenticationBuilder} with SCRAM-SHA-512. + */ public static AuthenticationBuilder configureScramSha(String namespaceName, String userName, SecurityProtocol securityProtocol) { final String saslJaasConfigEncrypted = KubeResourceManager.get().kubeClient().getClient().secrets().inNamespace(namespaceName).withName(userName).get().getData().get("sasl.jaas.config"); final String saslJaasConfigDecrypted = Util.decodeFromBase64(saslJaasConfigEncrypted); @@ -42,14 +74,40 @@ public static AuthenticationBuilder configureScramSha(String namespaceName, Stri .withSecurityProtocol(securityProtocol.toString()); } + /** + * Creates {@link Authentication} for TLS - mainly created for usual Kafka cluster CA and TLS KafkaUser. + * + * @param clusterName Name of the Kafka cluster for which the cluster CA should be obtained. + * @param userName Name of the KafkaUser for which the keystore cert and key are obtained. + * + * @return configured {@link Authentication} with TLS configuration. + */ public static Authentication configureTls(String clusterName, String userName) { return configureTls(KafkaResources.clusterCaCertificateSecretName(clusterName), userName, userName); } + /** + * Creates {@link Authentication} for TLS with custom CA certificate and keystore Secret. + * + * @param caCertificateSecretName Name of the custom CA certificate Secret. + * @param keystoreSecretName Name of the keystore certificate Secret. + * + * @return configured {@link Authentication} with TLS configuration. + */ public static Authentication configureTlsCustomCerts(String caCertificateSecretName, String keystoreSecretName) { return configureTls(caCertificateSecretName, keystoreSecretName, keystoreSecretName); } + /** + * Default configuration method for configuring {@link Authentication} with TLS. + * Truststore and Keystore can be configured with different Secrets - mainly in cases when we have multiple custom certificates. + * + * @param caCertificateSecretName Name of the Secret containing CA certificate. + * @param keystoreKeySecretName Name of the Secret containing keystore key. + * @param keystoreCertificateChainSecretName Name of the Secret containing keystore certificate chain. + * + * @return configured {@link Authentication} with TLS configuration. + */ private static Authentication configureTls(String caCertificateSecretName, String keystoreKeySecretName, String keystoreCertificateChainSecretName) { return new AuthenticationBuilder() .withNewSsl() @@ -64,6 +122,16 @@ private static Authentication configureTls(String caCertificateSecretName, Strin .build(); } + /** + * Creates {@link Authentication} for TLS OAuth. + * + * @param clusterName Name of the Kafka cluster for which the cluster CA is obtained. + * @param oauthClientId Id of the OAuth client that is used for keystore, but also for OAuth related configuration. + * @param oauthClientSecret Name of the OAuth client Secret. + * @param oauthTokenEndpointUri Endpoint URI for obtaining the OAuth token. + * + * @return configured {@link Authentication} with TLS OAuth configuration. + */ public static Authentication configureTlsOAuth(String clusterName, String oauthClientId, String oauthClientSecret, String oauthTokenEndpointUri) { EnvVar oauthSslEndpointEnvVar = new EnvVarBuilder() .withName("OAUTH_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM") @@ -79,10 +147,29 @@ public static Authentication configureTlsOAuth(String clusterName, String oauthC .build(); } + /** + * Creates {@link Authentication} with OAuth over plain. + * + * @param oauthClientId Id of the OAuth client that is used for OAuth related configuration. + * @param oauthClientSecret Name of the OAuth client Secret. + * @param oauthTokenEndpointUri Endpoint URI for obtaining the OAuth token. + * + * @return configured {@link Authentication} with Plain OAuth configuration. + */ public static Authentication configureOAuthPlain(String oauthClientId, String oauthClientSecret, String oauthTokenEndpointUri) { return configureOAuth(oauthClientId, oauthClientSecret, oauthTokenEndpointUri, null).build(); } + /** + * Default configuration method for configuring {@link AuthenticationBuilder} with OAuth. + * + * @param oauthClientId Id of the OAuth client that is used for OAuth related configuration. + * @param oauthClientSecret Name of the OAuth client Secret. + * @param oauthTokenEndpointUri Endpoint URI for obtaining the OAuth token. + * @param additionalEnvVars Additional OAuth related environment variables. + * + * @return configured {@link AuthenticationBuilder} with basic OAuth configuration. + */ private static AuthenticationBuilder configureOAuth(String oauthClientId, String oauthClientSecret, String oauthTokenEndpointUri, List additionalEnvVars) { List envVars = new ArrayList<>(List.of( new EnvVarBuilder() diff --git a/systemtest/src/main/java/io/strimzi/systemtest/utils/kubeUtils/objects/NetworkPolicyUtils.java b/systemtest/src/main/java/io/strimzi/systemtest/utils/kubeUtils/objects/NetworkPolicyUtils.java index 468c92a1d4a..63b2dd13653 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/utils/kubeUtils/objects/NetworkPolicyUtils.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/utils/kubeUtils/objects/NetworkPolicyUtils.java @@ -63,11 +63,26 @@ public static void allowNetworkPolicySettingsForClusterOperator(String namespace LOGGER.info("Network policy for LabelSelector {} successfully created", labelSelector); } + /** + * Creates NetworkPolicies for allowing access to Bridge HTTP server for HTTP clients - both producer and consumer. + * + * @param namespaceName Namespace where the NetworkPolicies should be created. + * @param bridgeClusterName Name of the Bridge cluster. + * @param producerName Name of the HTTP producer. + * @param consumerName Name of the HTTP consumer. + */ public static void allowNetworkPoliciesForBridgeClients(String namespaceName, String bridgeClusterName, String producerName, String consumerName) { allowNetworkPolicyForBridgeClient(namespaceName, bridgeClusterName, producerName); allowNetworkPolicyForBridgeClient(namespaceName, bridgeClusterName, consumerName); } + /** + * Creates NetworkPolicy for allowing access to Bridge HTTP server for specified HTTP client. + * + * @param namespaceName Namespace where the NetworkPolicy should be created. + * @param bridgeClusterName Name of the Bridge cluster. + * @param clientName Name of the HTTP client. + */ public static void allowNetworkPolicyForBridgeClient(String namespaceName, String bridgeClusterName, String clientName) { LabelSelector clientLabelSelector = new LabelSelectorBuilder() .addToMatchLabels("app", clientName) diff --git a/systemtest/src/test/java/io/strimzi/systemtest/security/PodSecurityProfilesST.java b/systemtest/src/test/java/io/strimzi/systemtest/security/PodSecurityProfilesST.java index 75929ca6385..b42d60585a7 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/security/PodSecurityProfilesST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/security/PodSecurityProfilesST.java @@ -207,6 +207,13 @@ void testOperandsWithRestrictedSecurityProfile() { ClientUtils.waitForClientTimeout(testStorage.getNamespaceName(), testStorage.getProducerName(), testStorage.getMessageCount()); } + /** + * Method that applies "restricted" Pod Security Profile to every client's Job. + * + * @param clientJob Client's Job that should be updated. + * + * @return updated client's Job with "restricted" Pod Security Profile. + */ private Job applyRestrictedSecurityProfileToClientJob(Job clientJob) { return new JobBuilder(clientJob) .editSpec() diff --git a/systemtest/src/test/java/io/strimzi/systemtest/tracing/OpenTelemetryST.java b/systemtest/src/test/java/io/strimzi/systemtest/tracing/OpenTelemetryST.java index e464da68768..91e05c48232 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/tracing/OpenTelemetryST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/tracing/OpenTelemetryST.java @@ -488,6 +488,13 @@ private TestStorage deployInitialResourcesAndGetTestStorage() { return testStorage; } + /** + * Method configuring the {@link Tracing} for particular client. + * + * @param serviceName Name of the service that should be used for {@link TracingConstants#OTEL_SERVICE_ENV} env. + * + * @return configured {@link Tracing}. + */ private Tracing tracingConfiguration(String serviceName) { return new TracingBuilder() .withServiceNameEnvVar(TracingConstants.OTEL_SERVICE_ENV)